From 8a37072deb7cb4591f78aeeaa51266cb5f26f45b Mon Sep 17 00:00:00 2001 From: Samuele Chiocca Date: Thu, 28 Nov 2024 15:52:50 +0100 Subject: [PATCH 1/4] feat: release v1.28.5 ported from 1.30.0 (WIP) --- .drone.yml | 101 +- .rules/.markdown-lint.yml | 10 +- .tool-versions | 4 +- Furyfile.yaml | 31 - MAINTENANCE.md | 90 + Makefile | 20 +- README.md | 26 +- ROADMAP.md | 30 +- banners/ekscluster.md | 8 +- banners/kfddistribution.md | 8 +- banners/onpremises.md | 8 +- defaults/ekscluster-kfd-v1alpha2.yaml | 19 +- defaults/kfddistribution-kfd-v1alpha2.yaml | 17 +- defaults/onpremises-kfd-v1alpha2.yaml | 17 +- docs/network-policies/README.md | 24 + docs/network-policies/modules/auth/README.md | 16 + docs/network-policies/modules/auth/sso.md | 53 + .../modules/ingress/README.md | 35 + docs/network-policies/modules/ingress/dual.md | 33 + .../modules/ingress/single.md | 35 + .../modules/logging/README.md | 60 + docs/network-policies/modules/logging/loki.md | 52 + .../modules/logging/opensearch.md | 48 + .../modules/monitoring/README.md | 55 + .../modules/monitoring/mimir.md | 53 + .../modules/monitoring/prometheus.md | 43 + docs/network-policies/modules/opa/README.md | 36 + .../modules/opa/gatekeeper.md | 26 + docs/network-policies/modules/opa/kyverno.md | 27 + .../modules/tracing/README.md | 32 + .../network-policies/modules/tracing/tempo.md | 42 + docs/network-policies/overview.md | 36 + docs/releases/{ => legacy}/v1.1.0.md | 0 docs/releases/{ => legacy}/v1.2.0.md | 0 docs/releases/{ => legacy}/v1.3.0.md | 0 docs/releases/{ => legacy}/v1.4.0.md | 0 docs/releases/{ => legacy}/v1.5.0.md | 0 docs/releases/{ => legacy}/v1.5.1.md | 0 docs/releases/{ => legacy}/v1.6.0.md | 0 docs/releases/{ => legacy}/v1.7.0.md | 0 docs/releases/{ => legacy}/v1.7.1.md | 0 docs/releases/v1.25.10.md | 2 +- docs/releases/v1.25.8.md | 2 +- docs/releases/v1.26.0.md | 2 +- docs/releases/v1.26.1.md | 2 +- docs/releases/v1.26.2.md | 2 +- docs/releases/v1.26.3.md | 2 +- docs/releases/v1.26.5.md | 2 +- docs/releases/v1.26.6.md | 2 +- docs/releases/v1.27.0.md | 2 +- docs/releases/v1.27.1.md | 2 +- docs/releases/v1.27.2.md | 2 +- docs/releases/v1.27.3.md | 2 +- docs/releases/v1.27.4.md | 2 +- docs/releases/v1.27.5.md | 2 +- docs/releases/v1.27.6.md | 2 +- docs/releases/v1.27.7.md | 2 +- docs/releases/v1.27.8.md | 14 +- docs/releases/v1.28.0.md | 2 +- docs/releases/v1.28.1.md | 2 +- docs/releases/v1.28.2.md | 2 +- docs/releases/v1.28.3.md | 2 +- docs/releases/v1.28.5.md | 206 + docs/schemas/ekscluster-kfd-v1alpha2.md | 1176 +++- docs/schemas/kfddistribution-kfd-v1alpha2.md | 873 ++- docs/schemas/onpremises-kfd-v1alpha2.md | 1163 ++- go.mod | 8 +- go.sum | 11 +- kfd.yaml | 28 +- kustomization.yaml | 44 - pkg/apis/config/model.go | 1 + pkg/apis/config/validation_test.go | 2 - .../ekscluster/v1alpha2/private/schema.go | 6219 +++++++++-------- pkg/apis/ekscluster/v1alpha2/public/schema.go | 2423 ++++--- .../kfddistribution/v1alpha2/public/schema.go | 2578 ++++--- pkg/apis/onpremises/v1alpha2/public/schema.go | 3719 +++++----- rules/onpremises-kfd-v1alpha2.yaml | 8 + schemas/private/ekscluster-kfd-v1alpha2.json | 3877 +++++----- schemas/public/ekscluster-kfd-v1alpha2.json | 691 +- .../public/kfddistribution-kfd-v1alpha2.json | 376 +- schemas/public/onpremises-kfd-v1alpha2.json | 512 +- schemas/public/spec-plugins.json | 4 + .../config/ekscluster-kfd-v1alpha2.yaml.tpl | 14 +- .../kfddistribution-kfd-v1alpha2.yaml.tpl | 3 + .../config/onpremises-kfd-v1alpha2.yaml.tpl | 20 +- templates/distribution/_helpers.tpl | 10 +- .../manifests/auth/kustomization.yaml.tpl | 4 + .../auth/policies/acme-http-solver.yaml.tpl | 35 + .../manifests/auth/policies/common.yaml.tpl | 44 + .../auth/policies/kustomization.yaml.tpl | 15 + .../manifests/auth/policies/pomerium.yaml.tpl | 52 + .../auth/policies/prometheus-metrics.yaml.tpl | 31 + .../auth/resources/pomerium-policy.yml.tpl | 2 + .../manifests/auth/secrets/basic-auth.yml.tpl | 13 +- .../manifests/aws/kustomization.yaml.tpl | 3 +- .../aws/resources/snapshotclasses.yml | 8 + .../resources/{sc.yml => storageclasses.yml} | 0 .../manifests/dr/kustomization.yaml.tpl | 8 +- .../manifests/dr/patches/infra-nodes.yml.tpl | 17 + .../dr/patches/velero-schedule-full.yml.tpl | 6 +- .../patches/velero-schedule-manifests.yml.tpl | 4 +- .../resources/volumeSnapshotLocation.yaml.tpl | 16 + .../manifests/ingress/kustomization.yaml.tpl | 12 +- .../eks-ingress-nginx-external.yml.tpl | 2 +- .../eks-ingress-nginx-internal.yml.tpl | 2 +- .../ingress/patches/eks-ingress-nginx.yml.tpl | 2 +- .../ingress/patches/infra-nodes.yml.tpl | 6 +- .../cert-manager/cert-manager.yaml.tpl | 88 + .../policies/cert-manager/common.yaml.tpl | 38 + .../cert-manager/kustomization.yaml.tpl | 12 + .../cert-manager/prometheus-metrics.yaml.tpl | 30 + .../policies/ingress-nginx/common.yaml.tpl | 44 + .../ingress-nginx/external-dns.yaml.tpl | 22 + .../ingress-nginx/forecastle.yaml.tpl | 59 + .../ingress-nginx/kustomization.yaml.tpl | 14 + .../nginx-ingress-controller.yaml.tpl | 51 + .../ingress-nginx/prometheus-metrics.yaml.tpl | 33 + .../ingress/policies/kustomization.yaml.tpl | 15 + .../cert-manager-clusterissuer.yml.tpl | 8 +- .../manifests/logging/kustomization.yaml.tpl | 3 + .../logging/patches/loki-config.yaml.tpl | 15 + .../logging/patches/minio.root.env.tpl | 4 +- .../logging/policies/common.yaml.tpl | 41 + .../logging/policies/configs.yaml.tpl | 23 + .../logging/policies/fluentbit.yaml.tpl | 66 + .../logging/policies/fluentd.yaml.tpl | 73 + .../logging/policies/kustomization.yaml.tpl | 26 + .../policies/logging-operator.yaml.tpl | 22 + .../manifests/logging/policies/loki.yaml.tpl | 150 + .../manifests/logging/policies/minio.yaml.tpl | 178 + .../policies/opensearch-dashboards.yaml.tpl | 118 + .../logging/policies/opensearch.yaml.tpl | 169 + .../logging/resources/ingress-infra.yml.tpl | 2 +- .../monitoring/kustomization.yaml.tpl | 13 + .../monitoring/patches/grafana.ini.tpl | 2 +- .../monitoring/patches/infra-nodes.yml.tpl | 2 + .../monitoring/patches/minio.root.env.tpl | 4 +- .../monitoring/policies/alertmanager.yaml.tpl | 44 + .../policies/blackbox-exporter.yaml.tpl | 35 + .../monitoring/policies/common.yaml.tpl | 44 + .../monitoring/policies/grafana.yaml.tpl | 82 + .../monitoring/policies/ingress.yaml.tpl | 103 + .../policies/kube-state-metrics.yaml.tpl | 34 + .../policies/kustomization.yaml.tpl | 33 + .../monitoring/policies/mimir.yaml.tpl | 191 + .../monitoring/policies/minio.yaml.tpl | 178 + .../policies/node-exporter.yaml.tpl | 32 + .../policies/prometheus-adapter.yaml.tpl | 50 + .../policies/prometheus-operator.yaml.tpl | 32 + .../monitoring/policies/prometheus.yaml.tpl | 166 + .../policies/x509-exporter.yaml.tpl | 45 + .../resources/ingress-infra.yml.tpl | 2 +- .../prometheus-agent.yaml.tpl | 4 +- .../resources/ingress-infra.yml.tpl | 7 +- .../manifests/opa/kustomization.yaml.tpl | 4 + .../opa/policies/gatekeeper/audit.yaml.tpl | 22 + .../opa/policies/gatekeeper/common.yaml.tpl | 43 + .../gatekeeper/controller-manager.yaml.tpl | 43 + .../gatekeeper-policy-manager.yaml.tpl | 48 + .../gatekeeper/kustomization.yaml.tpl | 15 + .../gatekeeper/prometheus-metrics.yaml.tpl | 29 + .../opa/policies/kustomization.yaml.tpl | 16 + .../opa/policies/kyverno/common.yaml.tpl | 42 + .../policies/kyverno/kustomization.yaml.tpl | 11 + .../opa/policies/kyverno/kyverno.yaml.tpl | 117 + .../manifests/tracing/kustomization.yaml.tpl | 4 + .../tracing/patches/minio.root.env.tpl | 4 +- .../manifests/tracing/patches/tempo.yaml.tpl | 3 +- .../tracing/policies/common.yaml.tpl | 42 + .../tracing/policies/kustomization.yaml.tpl | 14 + .../manifests/tracing/policies/minio.yaml.tpl | 177 + .../manifests/tracing/policies/tempo.yaml.tpl | 255 + templates/distribution/scripts/apply.sh.tpl | 8 +- .../distribution/scripts/pre-apply.sh.tpl | 73 +- .../ekscluster/terraform/main.auto.tfvars.tpl | 7 +- .../ekscluster/terraform/main.tf.tpl | 1 + .../ekscluster/terraform/variables.tf | 24 +- .../98.cluster-certificates-renewal.yaml.tpl | 2 +- .../kubernetes/onpremises/hosts.yaml.tpl | 3 + tests/e2e-kfddistribution-upgrades.sh | 23 +- .../furyctl-init-cluster-1.28.0.yaml | 104 - .../furyctl-init-cluster-1.28.1.yaml | 104 - .../furyctl-init-cluster-1.28.2.yaml | 104 - ....yaml => furyctl-init-cluster-1.28.5.yaml} | 6 +- ...l-10-migrate-from-none-to-safe-values.yaml | 5 +- ...-kyverno-default-policies-to-disabled.yaml | 5 +- ...-from-alertmanagerconfigs-to-disabled.yaml | 5 +- .../furyctl-2-migrate-from-tempo-to-none.yaml | 5 +- ...uryctl-3-migrate-from-kyverno-to-none.yaml | 5 +- ...furyctl-4-migrate-from-velero-to-none.yaml | 5 +- .../furyctl-5-migrate-from-loki-to-none.yaml | 6 +- .../furyctl-6-migrate-from-mimir-to-none.yaml | 6 +- ...ryctl-7-migrate-from-basicAuth-to-sso.yaml | 6 +- .../furyctl-8-migrate-from-sso-to-none.yaml | 8 +- .../furyctl-9-migrate-from-nginx-to-none.yaml | 6 +- .../kfddistribution/furyctl-cleanup-all.yaml | 5 +- .../kfddistribution/furyctl-init-cluster.yaml | 7 +- .../furyctl-init-with-values-from-nil.yaml | 7 +- tests/schema.sh | 30 +- .../ekscluster-kfd-v1alpha2/001-no.yaml | 26 +- .../ekscluster-kfd-v1alpha2/001-ok.yaml | 26 +- .../ekscluster-kfd-v1alpha2/002-no.yaml | 8 +- .../ekscluster-kfd-v1alpha2/002-ok.yaml | 8 +- .../ekscluster-kfd-v1alpha2/003-no.yaml | 26 +- .../ekscluster-kfd-v1alpha2/003-ok.yaml | 26 +- .../ekscluster-kfd-v1alpha2/004-no.yaml | 26 +- .../ekscluster-kfd-v1alpha2/004-ok.yaml | 26 +- .../ekscluster-kfd-v1alpha2/005-no.yaml | 26 +- .../ekscluster-kfd-v1alpha2/005-ok.yaml | 26 +- .../ekscluster-kfd-v1alpha2/006-no.yaml | 172 +- .../ekscluster-kfd-v1alpha2/006-ok.yaml | 26 +- .../ekscluster-kfd-v1alpha2/007-no.yaml | 168 +- .../ekscluster-kfd-v1alpha2/007-ok.yaml | 28 +- .../ekscluster-kfd-v1alpha2/008-no.yaml | 94 +- .../ekscluster-kfd-v1alpha2/008-ok.yaml | 86 +- .../ekscluster-kfd-v1alpha2/009-no.yaml | 90 +- .../ekscluster-kfd-v1alpha2/009-ok.yaml | 86 +- .../ekscluster-kfd-v1alpha2/010-no.yaml | 86 +- .../ekscluster-kfd-v1alpha2/010-ok.yaml | 86 +- .../ekscluster-kfd-v1alpha2/011-no.yaml | 70 +- .../ekscluster-kfd-v1alpha2/011-ok.yaml | 74 +- .../ekscluster-kfd-v1alpha2/012-no.yaml | 135 + .../ekscluster-kfd-v1alpha2/012-ok.yaml | 157 + .../ekscluster-kfd-v1alpha2/001-no.yaml | 26 +- .../ekscluster-kfd-v1alpha2/001-ok.yaml | 26 +- .../ekscluster-kfd-v1alpha2/002-no.yaml | 8 +- .../ekscluster-kfd-v1alpha2/002-ok.yaml | 8 +- .../ekscluster-kfd-v1alpha2/003-no.yaml | 26 +- .../ekscluster-kfd-v1alpha2/003-ok.yaml | 26 +- .../ekscluster-kfd-v1alpha2/004-no.yaml | 26 +- .../ekscluster-kfd-v1alpha2/004-ok.yaml | 26 +- .../ekscluster-kfd-v1alpha2/005-no.yaml | 26 +- .../ekscluster-kfd-v1alpha2/005-ok.yaml | 26 +- .../ekscluster-kfd-v1alpha2/006-no.yaml | 172 +- .../ekscluster-kfd-v1alpha2/006-ok.yaml | 26 +- .../ekscluster-kfd-v1alpha2/007-no.yaml | 168 +- .../ekscluster-kfd-v1alpha2/007-ok.yaml | 26 +- .../ekscluster-kfd-v1alpha2/008-no.yaml | 90 +- .../ekscluster-kfd-v1alpha2/008-ok.yaml | 82 +- .../ekscluster-kfd-v1alpha2/009-no.yaml | 86 +- .../ekscluster-kfd-v1alpha2/009-ok.yaml | 84 +- .../ekscluster-kfd-v1alpha2/010-no.yaml | 82 +- .../ekscluster-kfd-v1alpha2/010-ok.yaml | 82 +- .../ekscluster-kfd-v1alpha2/011-no.yaml | 72 +- .../ekscluster-kfd-v1alpha2/011-ok.yaml | 72 +- .../ekscluster-kfd-v1alpha2/012-no.yaml | 135 + .../ekscluster-kfd-v1alpha2/012-ok.yaml | 135 + 247 files changed, 20404 insertions(+), 11977 deletions(-) delete mode 100644 Furyfile.yaml create mode 100644 MAINTENANCE.md create mode 100644 docs/network-policies/README.md create mode 100644 docs/network-policies/modules/auth/README.md create mode 100644 docs/network-policies/modules/auth/sso.md create mode 100644 docs/network-policies/modules/ingress/README.md create mode 100644 docs/network-policies/modules/ingress/dual.md create mode 100644 docs/network-policies/modules/ingress/single.md create mode 100644 docs/network-policies/modules/logging/README.md create mode 100644 docs/network-policies/modules/logging/loki.md create mode 100644 docs/network-policies/modules/logging/opensearch.md create mode 100644 docs/network-policies/modules/monitoring/README.md create mode 100644 docs/network-policies/modules/monitoring/mimir.md create mode 100644 docs/network-policies/modules/monitoring/prometheus.md create mode 100644 docs/network-policies/modules/opa/README.md create mode 100644 docs/network-policies/modules/opa/gatekeeper.md create mode 100644 docs/network-policies/modules/opa/kyverno.md create mode 100644 docs/network-policies/modules/tracing/README.md create mode 100644 docs/network-policies/modules/tracing/tempo.md create mode 100644 docs/network-policies/overview.md rename docs/releases/{ => legacy}/v1.1.0.md (100%) rename docs/releases/{ => legacy}/v1.2.0.md (100%) rename docs/releases/{ => legacy}/v1.3.0.md (100%) rename docs/releases/{ => legacy}/v1.4.0.md (100%) rename docs/releases/{ => legacy}/v1.5.0.md (100%) rename docs/releases/{ => legacy}/v1.5.1.md (100%) rename docs/releases/{ => legacy}/v1.6.0.md (100%) rename docs/releases/{ => legacy}/v1.7.0.md (100%) rename docs/releases/{ => legacy}/v1.7.1.md (100%) create mode 100644 docs/releases/v1.28.5.md delete mode 100644 kustomization.yaml create mode 100644 templates/distribution/manifests/auth/policies/acme-http-solver.yaml.tpl create mode 100644 templates/distribution/manifests/auth/policies/common.yaml.tpl create mode 100644 templates/distribution/manifests/auth/policies/kustomization.yaml.tpl create mode 100644 templates/distribution/manifests/auth/policies/pomerium.yaml.tpl create mode 100644 templates/distribution/manifests/auth/policies/prometheus-metrics.yaml.tpl create mode 100644 templates/distribution/manifests/aws/resources/snapshotclasses.yml rename templates/distribution/manifests/aws/resources/{sc.yml => storageclasses.yml} (100%) create mode 100644 templates/distribution/manifests/dr/resources/volumeSnapshotLocation.yaml.tpl create mode 100644 templates/distribution/manifests/ingress/policies/cert-manager/cert-manager.yaml.tpl create mode 100644 templates/distribution/manifests/ingress/policies/cert-manager/common.yaml.tpl create mode 100644 templates/distribution/manifests/ingress/policies/cert-manager/kustomization.yaml.tpl create mode 100644 templates/distribution/manifests/ingress/policies/cert-manager/prometheus-metrics.yaml.tpl create mode 100644 templates/distribution/manifests/ingress/policies/ingress-nginx/common.yaml.tpl create mode 100644 templates/distribution/manifests/ingress/policies/ingress-nginx/external-dns.yaml.tpl create mode 100644 templates/distribution/manifests/ingress/policies/ingress-nginx/forecastle.yaml.tpl create mode 100644 templates/distribution/manifests/ingress/policies/ingress-nginx/kustomization.yaml.tpl create mode 100644 templates/distribution/manifests/ingress/policies/ingress-nginx/nginx-ingress-controller.yaml.tpl create mode 100644 templates/distribution/manifests/ingress/policies/ingress-nginx/prometheus-metrics.yaml.tpl create mode 100644 templates/distribution/manifests/ingress/policies/kustomization.yaml.tpl create mode 100644 templates/distribution/manifests/logging/policies/common.yaml.tpl create mode 100644 templates/distribution/manifests/logging/policies/configs.yaml.tpl create mode 100644 templates/distribution/manifests/logging/policies/fluentbit.yaml.tpl create mode 100644 templates/distribution/manifests/logging/policies/fluentd.yaml.tpl create mode 100644 templates/distribution/manifests/logging/policies/kustomization.yaml.tpl create mode 100644 templates/distribution/manifests/logging/policies/logging-operator.yaml.tpl create mode 100644 templates/distribution/manifests/logging/policies/loki.yaml.tpl create mode 100644 templates/distribution/manifests/logging/policies/minio.yaml.tpl create mode 100644 templates/distribution/manifests/logging/policies/opensearch-dashboards.yaml.tpl create mode 100644 templates/distribution/manifests/logging/policies/opensearch.yaml.tpl create mode 100644 templates/distribution/manifests/monitoring/policies/alertmanager.yaml.tpl create mode 100644 templates/distribution/manifests/monitoring/policies/blackbox-exporter.yaml.tpl create mode 100644 templates/distribution/manifests/monitoring/policies/common.yaml.tpl create mode 100644 templates/distribution/manifests/monitoring/policies/grafana.yaml.tpl create mode 100644 templates/distribution/manifests/monitoring/policies/ingress.yaml.tpl create mode 100644 templates/distribution/manifests/monitoring/policies/kube-state-metrics.yaml.tpl create mode 100644 templates/distribution/manifests/monitoring/policies/kustomization.yaml.tpl create mode 100644 templates/distribution/manifests/monitoring/policies/mimir.yaml.tpl create mode 100644 templates/distribution/manifests/monitoring/policies/minio.yaml.tpl create mode 100644 templates/distribution/manifests/monitoring/policies/node-exporter.yaml.tpl create mode 100644 templates/distribution/manifests/monitoring/policies/prometheus-adapter.yaml.tpl create mode 100644 templates/distribution/manifests/monitoring/policies/prometheus-operator.yaml.tpl create mode 100644 templates/distribution/manifests/monitoring/policies/prometheus.yaml.tpl create mode 100644 templates/distribution/manifests/monitoring/policies/x509-exporter.yaml.tpl create mode 100644 templates/distribution/manifests/opa/policies/gatekeeper/audit.yaml.tpl create mode 100644 templates/distribution/manifests/opa/policies/gatekeeper/common.yaml.tpl create mode 100644 templates/distribution/manifests/opa/policies/gatekeeper/controller-manager.yaml.tpl create mode 100644 templates/distribution/manifests/opa/policies/gatekeeper/gatekeeper-policy-manager.yaml.tpl create mode 100644 templates/distribution/manifests/opa/policies/gatekeeper/kustomization.yaml.tpl create mode 100644 templates/distribution/manifests/opa/policies/gatekeeper/prometheus-metrics.yaml.tpl create mode 100644 templates/distribution/manifests/opa/policies/kustomization.yaml.tpl create mode 100644 templates/distribution/manifests/opa/policies/kyverno/common.yaml.tpl create mode 100644 templates/distribution/manifests/opa/policies/kyverno/kustomization.yaml.tpl create mode 100644 templates/distribution/manifests/opa/policies/kyverno/kyverno.yaml.tpl create mode 100644 templates/distribution/manifests/tracing/policies/common.yaml.tpl create mode 100644 templates/distribution/manifests/tracing/policies/kustomization.yaml.tpl create mode 100644 templates/distribution/manifests/tracing/policies/minio.yaml.tpl create mode 100644 templates/distribution/manifests/tracing/policies/tempo.yaml.tpl delete mode 100644 tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.28.0.yaml delete mode 100644 tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.28.1.yaml delete mode 100644 tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.28.2.yaml rename tests/e2e/kfddistribution-upgrades/{furyctl-init-cluster-1.28.3.yaml => furyctl-init-cluster-1.28.5.yaml} (94%) create mode 100644 tests/schemas/private/ekscluster-kfd-v1alpha2/012-no.yaml create mode 100644 tests/schemas/private/ekscluster-kfd-v1alpha2/012-ok.yaml create mode 100644 tests/schemas/public/ekscluster-kfd-v1alpha2/012-no.yaml create mode 100644 tests/schemas/public/ekscluster-kfd-v1alpha2/012-ok.yaml diff --git a/.drone.yml b/.drone.yml index 4010ec38b..01e57d36a 100644 --- a/.drone.yml +++ b/.drone.yml @@ -15,13 +15,13 @@ clone: steps: - name: license-check - image: quay.io/sighup/golang:1.21.5 + image: quay.io/sighup/golang:1.23.3 pull: always commands: - make license-check - name: schema-check - image: quay.io/sighup/golang:1.21.5 + image: quay.io/sighup/golang:1.23.3 pull: always commands: - |- @@ -49,39 +49,52 @@ steps: - license-check - schema-check - - name: lint-go - image: quay.io/sighup/golang:1.21.5 - pull: always - commands: - - make lint-go - depends_on: - - license-check - - schema-check + # - name: lint-go + # image: quay.io/sighup/golang:1.23.3 + # pull: always + # commands: + # - make lint-go + # depends_on: + # - license-check + # - schema-check - name: test-schema - # KUBECTL 1.25.3 - KUSTOMIZE 3.5.3 - HELM 3.1.1 - YQ 4.21.1 - ISTIOCTL 1.9.4 - FURYCTL 0.9.0 - BATS 1.1.0 - image: quay.io/sighup/e2e-testing:1.1.0_0.9.0_3.1.1_1.9.4_1.25.3_3.5.3_4.21.1 + image: quay.io/sighup/e2e-testing:1.1.0_1.30.5_3.10.0_4.33.3 pull: always depends_on: - license-check - schema-check + environment: + JV_VERSION: 6.0.1 commands: + # we need to download `jv` for running the JSON Schemas tests. + - curl -L https://github.com/santhosh-tekuri/jsonschema/releases/download/v$${JV_VERSION}/jv-v$${JV_VERSION}-linux-amd64.tar.gz | tar zx --directory /usr/local/bin/ - bats -t tests/schema.sh - name: render - # KUBECTL 1.25.3 - KUSTOMIZE 3.5.3 - HELM 3.1.1 - YQ 4.21.1 - ISTIOCTL 1.9.4 - FURYCTL 0.9.0 - BATS 1.1.0 - image: quay.io/sighup/e2e-testing:1.1.0_0.9.0_3.1.1_1.9.4_1.25.3_3.5.3_4.21.1 + image: quay.io/sighup/e2e-testing:1.1.0_1.30.5_3.10.0_4.33.3 pull: always - commands: - - echo $${NETRC_FILE} > /root/.netrc - - furyctl vendor -H - - kustomize build . > distribution.yml - environment: - NETRC_FILE: - from_secret: NETRC_FILE depends_on: - license-check - schema-check + environment: + NETRC_FILE: + from_secret: NETRC_FILE + FURYCTL_VERSION: v0.30.0 + FURYCTL_CONFIG: tests/e2e/kfddistribution/furyctl-init-cluster.yaml + FURYCTL_DISTRO_LOCATION: ./ + FURYCTL_OUTDIR: ./ + FURYCTL_DISABLE_ANALYTICS: "true" + KUBECONFIG: ./dummy + commands: + - echo $${NETRC_FILE} > /root/.netrc + - echo "Installing furyctl version $${FURYCTL_VERSION}..." + - curl -L "https://github.com/sighupio/furyctl/releases/download/$${FURYCTL_VERSION}/furyctl-$(uname -s)-amd64.tar.gz" | tar xz -C /usr/local/bin/ + - furyctl download dependencies && furyctl dump template + # Move the folder with the manifests generated from the templates into the right path + - mv distribution $${FURYTCL_OUTDIR}.furyctl/$$(yq .metadata.name $FURYCTL_CONFIG) + # Build the whole distribution + - kustomize build $${FURYTCL_OUTDIR}.furyctl/$$(yq .metadata.name $FURYCTL_CONFIG)/distribution/manifests > distribution.yml - name: check-deprecated-apis image: us-docker.pkg.dev/fairwinds-ops/oss/pluto:v5 @@ -116,7 +129,7 @@ trigger: steps: - name: create Kind cluster - image: quay.io/sighup/dind-kind-kubectl-kustomize:0.20.0_1.29.1_3.10.0 + image: quay.io/sighup/dind-kind-kubectl-kustomize:0.24.0_1.30.5_3.10.0 pull: always volumes: - name: dockersock @@ -148,8 +161,7 @@ steps: - kind get kubeconfig --name $${CLUSTER_NAME} > $${KUBECONFIG} - name: e2e-kfddistribution - # KUBECTL_KUSTOMIZE_HELM_YQ_ISTIOCTL_FURYCTL_BATS - image: quay.io/sighup/e2e-testing:1.1.0_0.11.0_3.1.1_1.9.4_1.26.3_3.5.3_4.33.3 + image: quay.io/sighup/e2e-testing:1.1.0_1.30.5_3.10.0_4.33.3 pull: always # we need to use host network to access Kind API port that is listening on the worker's loopback # beacuse we mount the host's Docker socket to run Kind. @@ -157,7 +169,7 @@ steps: environment: CLUSTER_NAME: ${DRONE_REPO_NAME}-${DRONE_BUILD_NUMBER} KUBECONFIG: /drone/src/kubeconfig - FURYCTL_VERSION: v0.29.7-rc.0 + FURYCTL_VERSION: v0.30.0 depends_on: [create Kind cluster] commands: - export KUBECONFIG=/drone/src/kubeconfig @@ -175,7 +187,7 @@ steps: - tests/e2e-kfddistribution.sh - name: delete-kind-cluster - image: quay.io/sighup/dind-kind-kubectl-kustomize:0.20.0_1.29.1_3.10.0 + image: quay.io/sighup/dind-kind-kubectl-kustomize:0.24.0_1.30.5_3.10.0 volumes: - name: dockersock path: /var/run/docker.sock @@ -196,7 +208,7 @@ volumes: host: path: /var/run/docker.sock --- -name: e2e-kubernetes-1.28.0-1.28.1-1.28.2-1.28.3-1.28.4 +name: e2e-kubernetes-1.28.4-to-1.28.5 kind: pipeline type: docker @@ -220,7 +232,7 @@ trigger: steps: - name: create Kind cluster - image: quay.io/sighup/dind-kind-kubectl-kustomize:0.20.0_1.29.1_3.10.0 + image: quay.io/sighup/dind-kind-kubectl-kustomize:0.24.0_1.30.5_3.10.0 pull: always volumes: - name: dockersock @@ -252,8 +264,7 @@ steps: - kind get kubeconfig --name $${CLUSTER_NAME} > $${KUBECONFIG} - name: e2e-kfddistribution - # KUBECTL_KUSTOMIZE_HELM_YQ_ISTIOCTL_FURYCTL_BATS - image: quay.io/sighup/e2e-testing:1.1.0_0.11.0_3.1.1_1.9.4_1.26.3_3.5.3_4.33.3 + image: quay.io/sighup/e2e-testing:1.1.0_1.30.5_3.10.0_4.33.3 pull: always # we need to use host network to access Kind API port that is listening on the worker's loopback # beacuse we mount the host's Docker socket to run Kind. @@ -261,7 +272,7 @@ steps: environment: CLUSTER_NAME: ${DRONE_REPO_NAME}-${DRONE_BUILD_NUMBER}-upgrades KUBECONFIG: /drone/src/kubeconfig-upgrades - FURYCTL_VERSION: v0.29.7-rc.0 + FURYCTL_VERSION: v0.30.0 depends_on: [create Kind cluster] commands: - export KUBECONFIG=/drone/src/kubeconfig-upgrades @@ -279,7 +290,7 @@ steps: - tests/e2e-kfddistribution-upgrades.sh - name: delete-kind-cluster - image: quay.io/sighup/dind-kind-kubectl-kustomize:0.20.0_1.29.1_3.10.0 + image: quay.io/sighup/dind-kind-kubectl-kustomize:0.24.0_1.30.5_3.10.0 volumes: - name: dockersock path: /var/run/docker.sock @@ -306,7 +317,7 @@ type: docker depends_on: - e2e-kubernetes-1.28 - - e2e-kubernetes-1.28.0-1.28.1-1.28.2-1.28.3-1.28.4 + - e2e-kubernetes-1.28.4-to-1.28.5 platform: os: linux @@ -320,22 +331,6 @@ trigger: - refs/tags/**-docs* steps: - - name: prepare-release-manifests - image: quay.io/sighup/e2e-testing:1.1.0_0.7.0_3.1.1_1.9.4_1.24.1_3.8.7_4.21.1 - pull: always - depends_on: [clone] - environment: - RELEASE_MANIFESTS_PATH: fury-distribution-${DRONE_TAG}.yml - commands: - - furyctl vendor -H - - kustomize build . > $${RELEASE_MANIFESTS_PATH} - when: - ref: - include: - - refs/tags/** - exclude: - - refs/tags/**-docs* - - name: prepare-release-notes image: quay.io/sighup/fury-release-notes-plugin:3.7_2.8.4 depends_on: [clone] @@ -352,16 +347,12 @@ steps: image: plugins/github-release pull: always depends_on: - - prepare-release-manifests - prepare-release-notes settings: api_key: from_secret: github_token file_exists: skip files: - - fury-distribution-${DRONE_TAG}.yml - - Furyfile.yaml - - kustomization.yaml - kfd.yaml prerelease: true overwrite: true @@ -381,16 +372,12 @@ steps: image: plugins/github-release pull: always depends_on: - - prepare-release-manifests - prepare-release-notes settings: api_key: from_secret: github_token file_exists: skip files: - - fury-distribution-${DRONE_TAG}.yml - - Furyfile.yaml - - kustomization.yaml - kfd.yaml prerelease: false overwrite: true diff --git a/.rules/.markdown-lint.yml b/.rules/.markdown-lint.yml index d2ae0d5b6..c1712eda5 100644 --- a/.rules/.markdown-lint.yml +++ b/.rules/.markdown-lint.yml @@ -22,12 +22,12 @@ ############### # Rules by id # ############### -MD004: false # Unordered list style +MD004: false # Unordered list style MD007: - indent: 2 # Unordered list indentation + indent: 2 # Unordered list indentation MD013: - line_length: 808 # Line length -MD024: false # Multiple headers with the same content + line_length: 808 # Line length +MD024: false # Multiple headers with the same content MD026: punctuation: ".,;:!。,;:" # List of not allowed MD029: false # Ordered list item prefix @@ -39,4 +39,4 @@ MD041: false ################# # Rules by tags # ################# -blank_lines: false # Error on blank lines +blank_lines: false # Error on blank lines diff --git a/.tool-versions b/.tool-versions index fdaa990a2..65e31e4a8 100644 --- a/.tool-versions +++ b/.tool-versions @@ -1,7 +1,7 @@ bats 1.9.0 drone 1.7.0 -golang 1.21.5 -golangci-lint 1.55.2 +golang 1.23.3 +golangci-lint 1.62.0 yq 4.33.3 jq 1.6 make 4.4.1 diff --git a/Furyfile.yaml b/Furyfile.yaml deleted file mode 100644 index 7d1557fc1..000000000 --- a/Furyfile.yaml +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright (c) 2022 SIGHUP s.r.l All rights reserved. -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file. - ---- -versions: - auth: v0.3.0 - aws: v4.2.0 - dr: v2.3.0 - ingress: v2.3.3 - logging: v3.4.1 - monitoring: v3.2.0 - opa: v1.12.0 - networking: v1.17.0 - tracing: v1.0.3 - -bases: - - name: auth - - name: aws - - name: dr - - name: ingress - - name: logging - - name: monitoring - - name: networking - - name: opa - - name: tracing - -modules: - - name: aws - - name: dr - - name: ingress diff --git a/MAINTENANCE.md b/MAINTENANCE.md new file mode 100644 index 000000000..7b74b2938 --- /dev/null +++ b/MAINTENANCE.md @@ -0,0 +1,90 @@ +# Kubernetes Fury Distribution Maintenance Guide + +In this document you can find the steps needed to cook a new release of KFD. + +Some things to know before starting: + +- We maintain the latest 3 "minor" versions of KFD so, when you release a new version, you usually need to actually release 3 new versions. See the [versioning](docs/VERSIONING.md) file for more details if you are not familiar with KFD's versioning. +- Each release of KFD is tightly coupled with a release of `furyctl`. So you'll need to be able to update furyctl too, or ask for help from somebody that can. + +Usually, a new release of KFD is triggered by one of these events: + +- One or more core modules have been updated (new versions have been released), could be a bug fix or a simple bump of version to add new features. +- A new version with a bug fix or new features of one or more of the installers (on-premises, EKS, etc.) has been released. +- A new feature or a bug fix has been introduced into the template files of the distribution. +- A new release of Kubernetes is out and must be supported (usually triggers all the 3 previous points). + +The release is needed to render this updates available to KFD's user base. + +## Process + +The update process usually involves going back and forward between KFD (this repo) and furyctl. + +> [!NOTE] +> Some of the following steps may not apply in some specific cases, for example if you are only releasing a patch version that fixes an issue on the templates, maybe you can skip some steps. + +With no further ado, the steps to release a new version are: + +### fury-distribution + +> [!WARNING] +> If you are releasing a new `x.y.0` version create a `release-vX.` branch for the previous release. + +1. Create a new branch `feat/vx.y.z` (`v1.29.4`, for example) where to work on. +2. Create the PRs fixing the issues or adding new features to the templates or other files of fury-distribution, test them and merge them. +3. Update the `kfd.yaml` and `Furyfile.yaml` files, bumping the distribution version, adjusting the modules and installers versions where needed. +4. If the distribution schemas have been changed: + 1. If you haven't already, install the needed tools with `make tools-go`. + 2. Generate the new docs with `make generate-docs`. + 3. Generate the go models with `make generate-go-models` +5. Update the CI and e2e tests to point to the new version: + 1. `.drone.yaml` + 2. `tests/e2e-kfddistribution-*.yaml` + 3. `tests/e2e-kfddistribution-upgrades.sh` + 4. `tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.29.4.yaml` +6. Update the documentation: + 1. `README.md` + 2. `docs/COMPATIBILITY_MATRIX.md` + 3. `docs/VERSIONING.md` + 4. Write the release notes for the new version (`docs/releases/vx.y.z.md`) +7. Tag a release candidate to trigger all the e2e tests and fix eventual problems + +At this point, you'll need to switch to pushing some changes in furyctl + +### furyctl + +8. Create a new branch for the WIP release like `feat/vx.y.z` (`v0.29.8`, for example) +9. Add the new versions to the `internal/distribution/compatibility.go` file. +10. Add the migration paths to the corresponding kinds in `configs/upgrades/{onpremises,kfddistribution,ekscluster}/`, creating the needed folders for each new version. +11. Update the documentation: + 1. `README.md`. + 2. `docs/COMPATIBLITY_MATRIX.md`. +12. Update the compatibility unit tests with the new versions (`internal/distribution/compatibility_test.go`) +13. Bump the version to the new `fury-distribution` go library that has been released as RC in step `7`. + +```bash +go get -u github.com/sighupio/fury-distribution@v1.29.4 +go mod tidy +``` + +14. Tag a release candidate with the changes. This will be used in the e2e tests of the distribution. + +### Back to fury-distribution + +15. Update the CI's `.drone.yaml` file to use the release candidate for furyctl that you released in step `14`. +16. Update the e2e tests with the new upgrade paths. +17. Tag a new release candidate of the distribution to run the e2e tests using the new upgrade paths and furyctl's RC. +18. After the CI passes and the PR has been approved, merge into `main` +19. Tag the final release and let the CI run again and do the release. +20. **Repeat all the process for the other 2 "minor" versions that need to be updated**, but targeting `release-vx.y` branches instead of `main`. + +### Back to furyctl + +21. Once KFD new releases are live and the PR with the update to furyctl has been approved, merge and tag the final release. + +### Other changes + +After the release of the distribution and furyctl have been done, there are some other places that need to be updated to reflect the new releases, in no particular order: + +1. Update the quick-start guides in https://github.com/sighupio/fury-getting-started/ +2. Update KFD's documentation site with the new versions https://github.com/sighupio/kfd-docs/ diff --git a/Makefile b/Makefile index 51ad403b4..2ed6b4009 100644 --- a/Makefile +++ b/Makefile @@ -52,10 +52,10 @@ lint-go: .PHONY: tools-go tools-go: - @go install github.com/evanphx/json-patch/cmd/json-patch@v5.6.0 + @go install github.com/evanphx/json-patch/v5/cmd/json-patch@v5.9.0 @go install github.com/google/addlicense@v1.1.1 - @go install mvdan.cc/gofumpt@v0.5.0 - @go install golang.org/x/tools/cmd/goimports@v0.9.3 + @go install mvdan.cc/gofumpt@v0.7.0 + @go install golang.org/x/tools/cmd/goimports@v0.26.0 @go install github.com/daixiang0/gci@v0.10.1 @go install github.com/momaek/formattag@v0.0.9 @go install github.com/santhosh-tekuri/jsonschema/cmd/jv@v0.4.0 @@ -94,6 +94,20 @@ generate-docs: @md-gen gen --input schemas/public/kfddistribution-kfd-v1alpha2.json --output docs/schemas/kfddistribution-kfd-v1alpha2.md --overwrite --banner banners/kfddistribution.md @md-gen gen --input schemas/public/ekscluster-kfd-v1alpha2.json --output docs/schemas/ekscluster-kfd-v1alpha2.md --overwrite --banner banners/ekscluster.md +.PHONY: generate-np-diagrams +generate-np-diagrams: + docker run --rm -v $(PWD)/docs/network-policies:/workdir minlag/mermaid-cli:latest -i "/workdir/overview.md" -o "/workdir/overview.png" -w 2048 -H 1536 -b white + docker run --rm -v $(PWD)/docs/network-policies/modules/auth:/workdir minlag/mermaid-cli:latest -i "/workdir/sso.md" -o "/workdir/sso.png" -w 2048 -H 1536 -b white + docker run --rm -v $(PWD)/docs/network-policies/modules/ingress:/workdir minlag/mermaid-cli:latest -i "/workdir/single.md" -o "/workdir/single.png" -w 2048 -H 1536 -b white + docker run --rm -v $(PWD)/docs/network-policies/modules/ingress:/workdir minlag/mermaid-cli:latest -i "/workdir/dual.md" -o "/workdir/dual.png" -w 2048 -H 1536 -b white + docker run --rm -v $(PWD)/docs/network-policies/modules/logging:/workdir minlag/mermaid-cli:latest -i "/workdir/loki.md" -o "/workdir/loki.png" -w 2048 -H 1536 -b white + docker run --rm -v $(PWD)/docs/network-policies/modules/logging:/workdir minlag/mermaid-cli:latest -i "/workdir/opensearch.md" -o "/workdir/opensearch.png" -w 2048 -H 1536 -b white + docker run --rm -v $(PWD)/docs/network-policies/modules/monitoring:/workdir minlag/mermaid-cli:latest -i "/workdir/mimir.md" -o "/workdir/mimir.png" -w 2048 -H 1536 -b white + docker run --rm -v $(PWD)/docs/network-policies/modules/monitoring:/workdir minlag/mermaid-cli:latest -i "/workdir/prometheus.md" -o "/workdir/prometheus.png" -w 2048 -H 1536 -b white + docker run --rm -v $(PWD)/docs/network-policies/modules/opa:/workdir minlag/mermaid-cli:latest -i "/workdir/gatekeeper.md" -o "/workdir/gatekeeper.png" -w 2048 -H 1536 -b white + docker run --rm -v $(PWD)/docs/network-policies/modules/opa:/workdir minlag/mermaid-cli:latest -i "/workdir/kyverno.md" -o "/workdir/kyverno.png" -w 2048 -H 1536 -b white + docker run --rm -v $(PWD)/docs/network-policies/modules/tracing:/workdir minlag/mermaid-cli:latest -i "/workdir/tempo.md" -o "/workdir/tempo.png" -w 2048 -H 1536 -b white + .PHONY: dump-private-schema dump-private-schema: @cat schemas/public/ekscluster-kfd-v1alpha2.json | \ diff --git a/README.md b/README.md index 5a52f4902..67b597db7 100644 --- a/README.md +++ b/README.md @@ -7,8 +7,8 @@

Kubernetes Fury Distribution (KFD) is a certified battle-tested Kubernetes distribution based purely on upstream Kubernetes.

-[![Build Status](http://ci.sighup.io/api/badges/sighupio/fury-distribution/status.svg?ref=refs/tags/v1.28.4)](http://ci.sighup.io/sighupio/fury-distribution) -[![Release](https://img.shields.io/badge/release-v1.28.4-blue?label=FuryDistributionRelease)](https://github.com/sighupio/fury-distribution/releases/latest) +[![Build Status](http://ci.sighup.io/api/badges/sighupio/fury-distribution/status.svg?ref=refs/tags/v1.28.5)](http://ci.sighup.io/sighupio/fury-distribution) +[![Release](https://img.shields.io/badge/release-v1.28.5-blue?label=FuryDistributionRelease)](https://github.com/sighupio/fury-distribution/releases/latest) [![Slack](https://img.shields.io/badge/slack-@kubernetes/fury-yellow.svg?logo=slack)](https://kubernetes.slack.com/archives/C0154HYTAQH) [![License](https://img.shields.io/github/license/sighupio/fury-distribution)](https://github.com/sighupio/fury-distribution/blob/main/LICENSE) @@ -130,9 +130,9 @@ Current supported versions of KFD are: | KFD Version | Kubernetes Version | | :----------------------------------------------------------------------------: | :----------------: | -| [`1.29.3`](https://github.com/sighupio/fury-distribution/releases/tag/v1.29.3) | `1.29.x` | -| [`1.28.4`](https://github.com/sighupio/fury-distribution/releases/tag/v1.28.3) | `1.28.x` | -| [`1.27.8`](https://github.com/sighupio/fury-distribution/releases/tag/v1.27.8) | `1.27.x` | +| [`1.30.0`](https://github.com/sighupio/fury-distribution/releases/tag/v1.30.0) | `1.30.x` | +| [`1.29.5`](https://github.com/sighupio/fury-distribution/releases/tag/v1.29.5) | `1.29.x` | +| [`1.28.5`](https://github.com/sighupio/fury-distribution/releases/tag/v1.28.5) | `1.28.x` | Check the [compatibility matrix][compatibility-matrix] for additional information about previous releases of the Distribution and the compatibility with `furyctl`. @@ -174,14 +174,14 @@ KFD is open-source software and it's released under the following [LICENSE](LICE [dr-module]: https://github.com/sighupio/fury-kubernetes-dr [opa-module]: https://github.com/sighupio/fury-kubernetes-opa [auth-module]: https://github.com/sighupio/fury-kubernetes-auth -[networking-version]: https://img.shields.io/badge/release-v1.17.0-blue -[ingress-version]: https://img.shields.io/badge/release-v2.3.3-blue -[logging-version]: https://img.shields.io/badge/release-v3.4.1-blue -[monitoring-version]: https://img.shields.io/badge/release-v3.2.0-blue -[tracing-version]: https://img.shields.io/badge/release-v1.0.3-blue -[dr-version]: https://img.shields.io/badge/release-v2.3.0-blue -[opa-version]: https://img.shields.io/badge/release-v1.12.0-blue -[auth-version]: https://img.shields.io/badge/release-v0.3.0-blue +[networking-version]: https://img.shields.io/badge/release-v2.0.0-blue +[ingress-version]: https://img.shields.io/badge/release-v3.0.1-blue +[logging-version]: https://img.shields.io/badge/release-v4.0.0-blue +[monitoring-version]: https://img.shields.io/badge/release-v3.3.0-blue +[tracing-version]: https://img.shields.io/badge/release-v1.1.0-blue +[dr-version]: https://img.shields.io/badge/release-v3.0.0-blue +[opa-version]: https://img.shields.io/badge/release-v1.13.0-blue +[auth-version]: https://img.shields.io/badge/release-v0.4.0-blue diff --git a/ROADMAP.md b/ROADMAP.md index 54ef986bc..f5e0fb0a1 100644 --- a/ROADMAP.md +++ b/ROADMAP.md @@ -6,21 +6,27 @@ From 2024, development will focus on hardening the distribution security-wise, p ## Q1 2024 -- KFD 1.28.x release and release of the latest supported patch version for 1.27.x and 1.26.x, drop support for 1.25.x -- furyctl 0.28.x release -- Feature: Additional encryption parameters for ETCD on the OnPremises provider -- New project release: Gangplank, a forked and updated version of Gangway +- [x] KFD 1.28.x release and release of the latest supported patch version for 1.27.x and 1.26.x, drop support for 1.25.x +- [x] furyctl 0.28.x release +- [x] Feature: Additional encryption parameters for ETCD on the OnPremises provider +- [x] New project release: Gangplank, a forked and updated version of Gangway ## Q2 2024 -- KFD 1.29.x release and release of the latest supported patch version for 1.28.x and 1.27.x, drop support for 1.26.x -- furyctl 0.29.x release -- Feature: Improved hardening for all the images used in the KFD distribution by default -- Feature: Improved network policies for the KFD infrastructural components +- [x] KFD 1.29.x release and release of the latest supported patch version for 1.28.x and 1.27.x, drop support for 1.26.x +- [x] furyctl 0.29.x release +- [ ] Feature: Improved hardening for all the images used in the KFD distribution by default +- [ ] Feature: Improved network policies for the KFD infrastructural components ## H2 2024 -- KFD 1.30.x release and release of the latest supported patch version for 1.29.x and 1.28.x, drop support for 1.27.x -- furyctl 0.30.x release -- Feature: Add support for secured container runtimes -- Feature: Track dependencies provenance and dependencies signing +- [x] KFD 1.30.x release and release of the latest supported patch version for 1.29.x and 1.28.x, drop support for 1.27.x +- [x] furyctl 0.30.x release +- [ ] Feature: Add support for secured container runtimes +- [ ] Feature: Track dependencies provenance and dependencies signing +- [x] (from Q2 2024) Feature: Optional selection of improved hardened images used in the KFD distribution installation +- [x] (from Q2 2024) Feature: Experimental network policies for the KFD infrastructural components on the OnPremises provider +- [ ] KFD 1.31.x release +- [ ] furyctl 0.31.x release + + diff --git a/banners/ekscluster.md b/banners/ekscluster.md index a66d70188..873a47e0c 100644 --- a/banners/ekscluster.md +++ b/banners/ekscluster.md @@ -2,5 +2,11 @@ This document explains the full schema for the `kind: EKSCluster` for the `furyctl.yaml` file used by `furyctl`. This configuration file will be used to deploy a Kubernetes Fury Cluster deployed through AWS's Elastic Kubernetes Service. -An example file can be found [here](https://github.com/sighupio/fury-distribution/blob/feature/schema-docs/templates/config/ekscluster-kfd-v1alpha2.yaml.tpl). +An example configuration file can be created by running the following command: +```bash +furyctl create config --kind EKSCluster --version v1.29.4 --name example-cluster +``` + +> [!NOTE] +> Replace the version with your desired version of KFD. diff --git a/banners/kfddistribution.md b/banners/kfddistribution.md index a44f13847..797d2678f 100644 --- a/banners/kfddistribution.md +++ b/banners/kfddistribution.md @@ -2,5 +2,11 @@ This document explains the full schema for the `kind: KFDDistribution` for the `furyctl.yaml` file used by `furyctl`. This configuration file will be used to deploy the Kubernetes Fury Distribution modules on top of an existing Kubernetes cluster. -An example file can be found [here](https://github.com/sighupio/fury-distribution/blob/feature/schema-docs/templates/config/kfddistribution-kfd-v1alpha2.yaml.tpl). +An example configuration file can be created by running the following command: +```bash +furyctl create config --kind KFDDistribution --version v1.29.4 --name example-cluster +``` + +> [!NOTE] +> Replace the version with your desired version of KFD. diff --git a/banners/onpremises.md b/banners/onpremises.md index a8d8983dd..7f05c77c8 100644 --- a/banners/onpremises.md +++ b/banners/onpremises.md @@ -2,5 +2,11 @@ This document explains the full schema for the `kind: OnPremises` for the `furyctl.yaml` file used by `furyctl`. This configuration file will be used to deploy the Kubernetes Fury Distribution modules and cluster on premises. -An example file can be found [here](https://github.com/sighupio/fury-distribution/blob/feature/schema-docs/templates/config/onpremises-kfd-v1alpha2.yaml.tpl). +An example configuration file can be created by running the following command: +```bash +furyctl create config --kind OnPremises --version v1.29.4 --name example-cluster +``` + +> [!NOTE] +> Replace the version with your desired version of KFD. diff --git a/defaults/ekscluster-kfd-v1alpha2.yaml b/defaults/ekscluster-kfd-v1alpha2.yaml index 4a2ae7083..84d72930e 100644 --- a/defaults/ekscluster-kfd-v1alpha2.yaml +++ b/defaults/ekscluster-kfd-v1alpha2.yaml @@ -15,7 +15,7 @@ data: relativeVendorPath: "../../vendor" provider: type: eks - + networkPoliciesEnabled: false # the module section will be used to fine tune each module behaviour and configuration modules: # ingress module configuration @@ -47,7 +47,7 @@ data: name: "" create: true # internal field, should be either the VPC ID taken from the kubernetes - # phase or the ID of the created VPC in the Ifra phase + # phase or the ID of the created VPC in the Infra phase vpcId: "" # common configuration for nginx ingress controller nginx: @@ -229,7 +229,7 @@ data: kyverno: # this configuration adds namespaces to the excluded list, actually whitelisting them additionalExcludedNamespaces: [] - validationFailureAction: enforce + validationFailureAction: Enforce installDefaultPolicies: true # dr module configuration dr: @@ -253,9 +253,16 @@ data: bucketName: velerobucket schedules: install: true - cron: - manifests: "" - full: "" + definitions: + manifests: + schedule: "*/15 * * * *" + ttl: "720h0m0s" + full: + schedule: "0 1 * * *" + ttl: "720h0m0s" + snapshotMoveData: false + snapshotController: + install: false # auth module configuration auth: overrides: diff --git a/defaults/kfddistribution-kfd-v1alpha2.yaml b/defaults/kfddistribution-kfd-v1alpha2.yaml index 9bcbb3ff8..c943ae1df 100644 --- a/defaults/kfddistribution-kfd-v1alpha2.yaml +++ b/defaults/kfddistribution-kfd-v1alpha2.yaml @@ -15,7 +15,7 @@ data: relativeVendorPath: "../../vendor" provider: type: none - + networkPoliciesEnabled: false # the module section will be used to fine tune each module behaviour and configuration modules: # ingress module configuration @@ -221,7 +221,7 @@ data: kyverno: # this configuration adds namespaces to the excluded list, actually whitelisting them additionalExcludedNamespaces: [] - validationFailureAction: enforce + validationFailureAction: Enforce installDefaultPolicies: true # dr module configuration dr: @@ -240,9 +240,16 @@ data: bucketName: velerobucket schedules: install: true - cron: - manifests: "" - full: "" + definitions: + manifests: + schedule: "*/15 * * * *" + ttl: "720h0m0s" + full: + schedule: "0 1 * * *" + ttl: "720h0m0s" + snapshotMoveData: false + snapshotController: + install: false # auth module configuration auth: overrides: diff --git a/defaults/onpremises-kfd-v1alpha2.yaml b/defaults/onpremises-kfd-v1alpha2.yaml index afb37d0d2..d2717c3ba 100644 --- a/defaults/onpremises-kfd-v1alpha2.yaml +++ b/defaults/onpremises-kfd-v1alpha2.yaml @@ -15,7 +15,7 @@ data: relativeVendorPath: "../../vendor" provider: type: none - + networkPoliciesEnabled: false # the module section will be used to fine tune each module behaviour and configuration modules: # ingress module configuration @@ -221,7 +221,7 @@ data: kyverno: # this configuration adds namespaces to the excluded list, actually whitelisting them additionalExcludedNamespaces: [] - validationFailureAction: enforce + validationFailureAction: Enforce installDefaultPolicies: true # dr module configuration dr: @@ -240,9 +240,16 @@ data: bucketName: velerobucket schedules: install: true - cron: - manifests: "" - full: "" + definitions: + manifests: + schedule: "*/15 * * * *" + ttl: "720h0m0s" + full: + schedule: "0 1 * * *" + ttl: "720h0m0s" + snapshotMoveData: false + snapshotController: + install: false # auth module configuration auth: overrides: diff --git a/docs/network-policies/README.md b/docs/network-policies/README.md new file mode 100644 index 000000000..5511fbb1e --- /dev/null +++ b/docs/network-policies/README.md @@ -0,0 +1,24 @@ +# Network Policies Documentation + +This documentation describes all Network Policies of the KFD components for the OnPremises schema. + +## Modules +- [Auth](modules/auth/README.md) - Pomerium SSO +- [Ingress](modules/ingress/README.md) - Nginx (single/dual) + Cert-manager +- [Logging](modules/logging/README.md) - OpenSearch/Loki +- [Monitoring](modules/monitoring/README.md) - Prometheus/Mimir +- [OPA](modules/opa/README.md) - Gatekeeper/Kyverno +- [Tracing](modules/tracing/README.md) - Tempo + +## Common Patterns +All namespaces include: +- Default deny-all policy +- DNS access to kube-dns +- Prometheus metrics collection +- Kubernetes API server access where needed + +## High Level Overview +- [Overview](overview.md) + +## Instructions +Generate the new Network Policies diagrams with `make generate-np-diagrams`. \ No newline at end of file diff --git a/docs/network-policies/modules/auth/README.md b/docs/network-policies/modules/auth/README.md new file mode 100644 index 000000000..488d84b6b --- /dev/null +++ b/docs/network-policies/modules/auth/README.md @@ -0,0 +1,16 @@ +# Auth Module Network Policies + +## Components +- Pomerium + +## Namespaces +- pomerium + +## Network Policies List +- deny-all +- all-egress-kube-dns +- pomerium-ingress-nginx +- pomerium-egress-all + +## Configurations +- [SSO with Pomerium](sso.md) diff --git a/docs/network-policies/modules/auth/sso.md b/docs/network-policies/modules/auth/sso.md new file mode 100644 index 000000000..5d6b816f1 --- /dev/null +++ b/docs/network-policies/modules/auth/sso.md @@ -0,0 +1,53 @@ +# SSO with Pomerium + +```mermaid +graph TD + %% Namespaces + subgraph ingress-nginx + nginx[Nginx Controller] + end + + subgraph pomerium + pom[Pomerium
app: pomerium] + acme[ACME HTTP Solver
app: cert-manager] + end + + subgraph monitoring + graf[Grafana] + prom[Prometheus] + am[Alertmanager] + minio_monitoring[MinIO] + end + + subgraph logging + osd[OpenSearch Dashboards] + minio_logging[MinIO] + end + + subgraph tracing + minio_tracing[MinIO] + end + + subgraph gatekeer-system + gpm[Gatekeeper Policy Manager] + end + + %% External and K8s Core Components + dns[Kube DNS] + ext[External] + + %% Edges + pom -->|"53/UDP"| dns + nginx -->|"8080/TCP"| pom + nginx -->|"8089/TCP"| acme + prom -->|"9090/TCP metrics"| pom + pom -->|"443/TCP"| ext + pom -->|"3000/TCP"| graf + pom -->|"9090/TCP"| prom + pom -->|"9093/TCP"| am + pom -->|"5601/TCP"| osd + pom -->|"9001/TCP"| minio_logging + pom -->|"9001/TCP"| minio_tracing + pom -->|"9001/TCP"| minio_monitoring + pom -->|"8080/TCP"| gpm +``` \ No newline at end of file diff --git a/docs/network-policies/modules/ingress/README.md b/docs/network-policies/modules/ingress/README.md new file mode 100644 index 000000000..23eb467df --- /dev/null +++ b/docs/network-policies/modules/ingress/README.md @@ -0,0 +1,35 @@ +# Ingress Module Network Policies + +## Components +- Nginx Ingress Controller (single/dual mode) +- Cert-manager +- Forecastle + +## Namespaces +- ingress-nginx +- cert-manager + +## Network Policies List + +### Cert-manager +- deny-all +- all-egress-kube-dns +- cert-manager-egress-kube-apiserver +- cert-manager-webhook-ingress-kube-apiserver +- cert-manager-egress-https +- cert-manager-ingress-prometheus-metrics +- acme-http-solver-ingress-lets-encrypt + +### Ingress-nginx +- deny-all +- all-egress-kube-dns +- forecastle-ingress-nginx +- forecastle-egress-kube-apiserver +- nginx-egress-all +- all-ingress-nginx +- nginx-ingress-prometheus-metric +- external-dns + +## Configurations +- [Single Nginx](single.md) +- [Dual Nginx](dual.md) diff --git a/docs/network-policies/modules/ingress/dual.md b/docs/network-policies/modules/ingress/dual.md new file mode 100644 index 000000000..6b988d41d --- /dev/null +++ b/docs/network-policies/modules/ingress/dual.md @@ -0,0 +1,33 @@ +# Dual Nginx Configuration + +```mermaid +graph TD + %% Namespaces + subgraph ingress-nginx + nginx[Nginx Controller
app: ingress] + fc[Forecastle
app: forecastle] + end + + subgraph cert-manager + cm[Cert Manager
app: cert-manager] + cmw[Cert Manager Webhook] + end + + %% External and K8s Core Components + dns[Kube DNS] + api[Kubernetes API] + prom[Prometheus] + ext[External ACME / Internet] + + %% Edges + nginx & cm -->|"53/UDP"| dns + cm -->|"6443/TCP"| api + fc -->|"6443/TCP"| api + api -->|"10250/TCP"| cmw + prom -->|"10254/TCP"| nginx + prom -->|"9402/TCP"| cm + cm -->|"443,80/TCP"| ext + all[All Namespaces] -->|"8080,8443,9443/TCP"| nginx + nginx -->|"egress: all"| all + nginx -->|"3000/TCP"| fc +``` \ No newline at end of file diff --git a/docs/network-policies/modules/ingress/single.md b/docs/network-policies/modules/ingress/single.md new file mode 100644 index 000000000..b0f7b2054 --- /dev/null +++ b/docs/network-policies/modules/ingress/single.md @@ -0,0 +1,35 @@ +# Single Nginx Configuration + +```mermaid +graph TD + %% Namespaces + subgraph ingress-nginx + nginx[Nginx Controller
app: ingress-nginx] + fc[Forecastle
app: forecastle] + edns[ExternalDNS
app: external-dns] + end + + subgraph cert-manager + cm[Cert Manager
app: cert-manager] + cmw[Cert Manager Webhook] + end + + %% External and K8s Core Components + dns[Kube DNS] + api[Kubernetes API] + prom[Prometheus] + ext[External / ACME] + + %% Edges + nginx & cm -->|"53/UDP"| dns + cm -->|"6443/TCP"| api + fc -->|"6443/TCP"| api + api -->|"10250/TCP"| cmw + prom -->|"10254/TCP"| nginx + prom -->|"9402/TCP"| cm + cm -->|"443,80/TCP"| ext + all[All Namespaces] -->|"8080,8443,9443/TCP"| nginx + nginx -->|"egress: all"| all + nginx -->|"3000/TCP"| fc + edns --> |"egress: all"| ext +``` \ No newline at end of file diff --git a/docs/network-policies/modules/logging/README.md b/docs/network-policies/modules/logging/README.md new file mode 100644 index 000000000..df072997e --- /dev/null +++ b/docs/network-policies/modules/logging/README.md @@ -0,0 +1,60 @@ +# Logging Module Network Policies + +## Components + +- OpenSearch Stack +- Loki Stack + +## Namespaces + +- logging + +## Network Policies List + +### Common Policies + +- deny-all +- all-egress-kube-dns +- event-tailer-egress-kube-apiserver +- fluentd-egress-all +- fluentbit-egress-fluentd +- fluentbit-egress-kube-apiserver +- fluentbit-ingress-prometheus-metrics +- logging-operator-egress-kube-apiserver + +### OpenSearch Stack + +- fluentd-ingress-fluentbit +- fluentd-ingress-prometheus-metrics +- opensearch-discovery +- opensearch-ingress-dashboards +- opensearch-ingress-fluentd +- opensearch-ingress-prometheus-metrics +- opensearch-ingress-jobs +- opensearch-dashboards-egress-opensearch +- opensearch-dashboards-ingress-nginx +- opensearch-dashboards-ingress-jobs +- jobs-egress-opensearch +- jobs-egress-opensearch-dashboards + +### Loki Stack + +- loki-distributed-ingress-fluentd +- loki-distributed-ingress-grafana +- loki-distributed-ingress-prometheus-metrics +- loki-distributed-discovery +- loki-distributed-egress-all + +### MinIO + +- minio-ingress-namespace +- minio-buckets-setup-egress-kube-apiserver +- minio-buckets-setup-egress-minio +- minio-ingress-prometheus-metrics +- minio-ingress-nginx +- minio-egress-https + +## Configurations + +- [OpenSearch Stack](opensearch.md) +- [Loki Stack](loki.md) diff --git a/docs/network-policies/modules/logging/loki.md b/docs/network-policies/modules/logging/loki.md new file mode 100644 index 000000000..1b87f37ce --- /dev/null +++ b/docs/network-policies/modules/logging/loki.md @@ -0,0 +1,52 @@ +# Loki Stack Configuration + +```mermaid +graph TD + %% Namespaces + subgraph logging + fb[Fluentbit
app.kubernetes.io/name: fluentbit] + fd[Fluentd
app.kubernetes.io/name: fluentd] + loki_gateway[Loki Gateway
app.kubernetes.io/component: gateway] + loki_compactor[Loki Compactor
app.kubernetes.io/component: compactor] + loki_distributor[Loki Distributor
app.kubernetes.io/component: distributor] + loki_ingester[Loki Ingester
app.kubernetes.io/component: ingester] + loki_querier[Loki Querier
app.kubernetes.io/component: querier] + loki_query_frontend[Loki Query Frontend
app.kubernetes.io/component: query-frontend] + minio[MinIO
app: minio] + bucket[MinIO Bucket Setup
app: minio-logging-buckets-setup] + end + + subgraph monitoring + prom[Prometheus] + graf[Grafana] + end + + pom[Pomerium] + + %% External and K8s Core Components + api[Kubernetes API] + ext[External] + dns[Kube DNS] + + %% Edges + logging -->|"53/UDP"| dns + bucket -->|"6443/TCP"| api + fb -->|"24240/TCP"| fd + fd -->|"8080/TCP"| loki_gateway + prom -->|"3100/TCP"| loki_gateway + graf -->|"8080/TCP"| loki_gateway + prom -->|"2020/TCP"| fb + fb -->|"6443/TCP"| api + loki_query_frontend -->|"loki-discovery
9095,7946,3100/TCP"| loki_distributor + loki_distributor -->|"loki-discovery
9095,7946,3100/TCP"| loki_ingester + loki_querier -->|"loki-discovery
9095,7946,3100/TCP"| loki_ingester + loki_querier -->|"loki-discovery
9095,7946,3100/TCP"| loki_query_frontend + loki_compactor -->|"loki-discovery
9095,7946,3100/TCP"| loki_ingester + loki_compactor -->|"egress: all"| minio + loki_ingester -->|"egress: all"| minio + loki_querier -->|"egress: all"| minio + bucket -->|"9000/TCP"| minio + minio -->|"443/TCP"| ext + pom -->|"9001/TCP"| minio + minio -->|"9000/TCP"| logging +``` \ No newline at end of file diff --git a/docs/network-policies/modules/logging/opensearch.md b/docs/network-policies/modules/logging/opensearch.md new file mode 100644 index 000000000..5cf5727eb --- /dev/null +++ b/docs/network-policies/modules/logging/opensearch.md @@ -0,0 +1,48 @@ +# Opensearch Stack Configuration + +```mermaid +graph TD + %% Namespace + subgraph logging + fb[Fluentbit
app.kubernetes.io/name: fluentbit] + fd[Fluentd
app.kubernetes.io/name: fluentd] + os[OpenSearch
app.kubernetes.io/name: opensearch] + osd[OpenSearch Dashboards
app: opensearch-dashboards] + minio[MinIO
app: minio] + bucket[MinIO Bucket Setup
app: minio-logging-buckets-setup] + op[Logging Operator
app.kubernetes.io/name: logging-operator] + et[Event Tailer
app.kubernetes.io/name: event-tailer] + job[OpenSearch Jobs] + end + + %% External and K8s Core Components + api[Kubernetes API] + ext[External] + prom[Prometheus] + pom[Pomerium] + nginx[Nginx] + dns[Kube DNS] + + %% Edges + logging --->|"53/UDP,TCP"| dns + fb -->|"6443/TCP"| api + et -->|"6443/TCP"| api + op -->|"6443/TCP"| api + bucket -->|"6443/TCP"| api + fb -->|"24240/TCP"| fd + fd -->|"egress: all"| os + osd -->|"9200/TCP"| os + pom -->|"5601/TCP"| osd + job -->|"5601/TCP"| osd + job -->|"9200/TCP"| os + prom -->|"2020/TCP"| fb + prom -->|"24231/TCP"| fd + prom -->|"9108/TCP"| os + prom -->|"9000/TCP"| minio + bucket -->|"9000/TCP"| minio + minio -->|"443/TCP"| ext + pom -->|"9001/TCP"| minio + logging -->|"9000/TCP"| minio + nginx -->|"9001/TCP"| minio + nginx -->|"5601/TCP"| osd +``` \ No newline at end of file diff --git a/docs/network-policies/modules/monitoring/README.md b/docs/network-policies/modules/monitoring/README.md new file mode 100644 index 000000000..acf6e419e --- /dev/null +++ b/docs/network-policies/modules/monitoring/README.md @@ -0,0 +1,55 @@ +# Monitoring Module Network Policies + +## Components +- Prometheus Stack +- Mimir Stack + +## Namespaces +- monitoring + +## Network Policies List + +### Common Policies +- deny-all +- all-egress-kube-dns +- alertmanager-main +- alertmanager-ingress-nginx +- blackbox-exporter +- grafana +- grafana-egress-tempo-gateway +- grafana-ingress-nginx +- kube-state-metrics +- node-exporter +- prometheus-ingress-nginx +- prometheus-adapter +- prometheus-ingress-prometheus-adapter +- prometheus-operator +- x509-exporter-egress-kube-apiserver +- x509-exporter-ingress-prometheus-metrics +- kube-state-metrics + +### MinIO +- minio-ingress-namespace +- minio-buckets-setup-egress-kube-apiserver +- minio-buckets-setup-egress-minio +- minio-ingress-prometheus-metrics +- minio-monitoring-egress-all + +### Prometheus specific +- prometheus-k8s +- prometheus-egress-minio +- prometheus-egress-kube-apiserver + +### Mimir specific +- mimir-distributed-discovery +- mimir-distributed-ingress-prometheus-metrics +- mimir-gateway-ingress-grafana +- mimir-querier-egress-https +- mimir-ingester-egress-https +- mimir-distributed-egress-minio (when using MinIO) +- mimir-distributed-egress-all (when not using MinIO) + +## Configurations +- [Prometheus Stack](prometheus.md) +- [Mimir Stack](mimir.md) + diff --git a/docs/network-policies/modules/monitoring/mimir.md b/docs/network-policies/modules/monitoring/mimir.md new file mode 100644 index 000000000..5619dbfe4 --- /dev/null +++ b/docs/network-policies/modules/monitoring/mimir.md @@ -0,0 +1,53 @@ +# Mimir Stack Configuration + +```mermaid +graph TD + %% Namespace + subgraph monitoring + gateway[Mimir Gateway
app.kubernetes.io/component: gateway] + distributor[Mimir Distributor
app.kubernetes.io/component: distributor] + ingester[Mimir Ingester
app.kubernetes.io/component: ingester] + querier[Mimir Querier
app.kubernetes.io/component: querier] + qfront[Mimir Query Frontend
app.kubernetes.io/component: query-frontend] + qsched[Mimir Query Scheduler
app.kubernetes.io/component: query-scheduler] + store[Mimir Store Gateway
app.kubernetes.io/component: store-gateway] + compactor[Mimir Compactor
app.kubernetes.io/component: compactor] + grafana[Grafana
app.kubernetes.io/name: grafana] + prom[Prometheus
app.kubernetes.io/name: prometheus] + am[Alertmanager
app.kubernetes.io/component: alert-router] + bb[Blackbox Exporter
app.kubernetes.io/name: blackbox-exporter] + ksm[Kube State Metrics
app.kubernetes.io/name: kube-state-metrics] + ne[Node Exporter
app.kubernetes.io/name: node-exporter] + x509[x509 Exporter
app: x509-certificate-exporter] + minio[MinIO
app: minio] + bucket[MinIO Bucket Setup
app: minio-monitoring-buckets-setup] + end + + %% External and K8s Core Components + api[Kubernetes API] + dns[Kube DNS] + + %% Edges + monitoring -->|"53/UDP,TCP"| dns + bucket -->|"9000/TCP"| minio + qfront -->|"mimir-discovery
9095,7946,8080/TCP"| qsched + qfront -->|"mimir-discovery
9095,7946,8080/TCP"| querier + gateway -->|"mimir-discovery
9095,7946,8080/TCP"| distributor + distributor -->|"mimir-discovery
9095,7946,8080/TCP"| ingester + qsched -->|"mimir-discovery
9095,7946,8080/TCP"| querier + querier -->|"mimir-discovery
9095,7946,8080/TCP"| store + querier -->|"mimir-discovery
9095,7946,8080/TCP"| ingester + store -->|"mimir-discovery
9095,7946,8080/TCP"| compactor + compactor -->|"mimir-discovery
9095,7946,8080/TCP"| store + ingester & store & compactor -->|"9000/TCP"| minio + grafana -->|"8080/TCP"| gateway + prom -->|"8080/TCP"| distributor + prom -->|"9115,19115/TCP"| bb + prom -->|"8443,9443/TCP"| ksm + prom -->|"9100/TCP"| ne + prom -->|"9793/TCP"| x509 + prom -->|"9093,8080/TCP"| am + pom[Pomerium] -->|"3000/TCP"| grafana + pom -->|"9093/TCP"| am + x509 -->|"6443/TCP"| api +``` \ No newline at end of file diff --git a/docs/network-policies/modules/monitoring/prometheus.md b/docs/network-policies/modules/monitoring/prometheus.md new file mode 100644 index 000000000..f05457035 --- /dev/null +++ b/docs/network-policies/modules/monitoring/prometheus.md @@ -0,0 +1,43 @@ +# Prometheus Stack Configuration + +```mermaid +graph TD + %% Namespace + subgraph monitoring + prom[Prometheus
app.kubernetes.io/name: prometheus] + grafana[Grafana
app.kubernetes.io/name: grafana] + am[Alertmanager
app.kubernetes.io/name: alertmanager] + bb[Blackbox Exporter
app.kubernetes.io/name: blackbox-exporter
app.kubernetes.io/component: exporter] + ksm[Kube State Metrics
app.kubernetes.io/name: kube-state-metrics
app.kubernetes.io/component: exporter] + ne[Node Exporter
app.kubernetes.io/name: node-exporter
app.kubernetes.io/component: exporter] + pa[Prometheus Adapter
app.kubernetes.io/name: prometheus-adapter
app.kubernetes.io/component: metrics-adapter] + po[Prometheus Operator
app.kubernetes.io/name: prometheus-operator
app.kubernetes.io/component: controller] + minio[MinIO
app: minio] + bucket[MinIO Bucket Setup
app: minio-monitoring-buckets-setup] + x509[x509 Exporter
app: x509-certificate-exporter] + end + + %% External and K8s Core Components + api[Kubernetes API] + dns[Kube DNS] + pom["Pomerium"] + + %% Edges + monitoring -->|"53/UDP,TCP"| dns + bucket -->|"9000/TCP"| minio + prom -->|"6443,8405/TCP"| api + prom -->|"9000/TCP"| minio + prom -->|"9115,19115/TCP"| bb + prom -->|"8443,9443/TCP"| ksm + prom -->|"9100/TCP"| ne + prom -->|"8443/TCP"| po + prom -->|"9793/TCP"| x509 + prom & am & bb & grafana & ksm & ne & pa & po -->|"egress: all"| all[All Namespaces] + pa -->|"9090/TCP"| prom + grafana -->|"9090/TCP"| prom + prom -->|"9093,8080/TCP"| am + pom -->|"9093/TCP"| am + prom -->|"3000/TCP"| grafana + pom -->|"3000/TCP"| grafana + x509 -->|"6443/TCP"| api +``` diff --git a/docs/network-policies/modules/opa/README.md b/docs/network-policies/modules/opa/README.md new file mode 100644 index 000000000..a43db5d2a --- /dev/null +++ b/docs/network-policies/modules/opa/README.md @@ -0,0 +1,36 @@ +# OPA Module Network Policies + +## Components +- Gatekeeper + Gatekeeper Policy Manager +- Kyverno + +## Namespaces +- gatekeeper-system (when using Gatekeeper) +- kyverno (when using Kyverno) + +## Network Policies List + +### Gatekeeper +- deny-all +- all-egress-dns +- audit-controller-egress-kube-apiserver +- controller-manager-egress-kube-apiserver +- controller-manager-ingress-kube-apiserver +- gpm-egress-kube-apiserver +- gpm-ingress-pomerium +- gatekeeper-ingress-prometheus-metrics + +### Kyverno +- deny-all +- all-egress-dns +- kyverno-admission-egress-kube-apiserver +- kyverno-admission-ingress-nodes +- kyverno-background-egress-kube-apiserver +- kyverno-reports-egress-kube-apiserver +- kyverno-cleanup-egress-kube-apiserver +- kyverno-cleanup-reports-egress-kube-apiserver + +## Configurations +- [Gatekeeper](gatekeeper.md) +- [Kyverno](kyverno.md) + diff --git a/docs/network-policies/modules/opa/gatekeeper.md b/docs/network-policies/modules/opa/gatekeeper.md new file mode 100644 index 000000000..d3d049f7d --- /dev/null +++ b/docs/network-policies/modules/opa/gatekeeper.md @@ -0,0 +1,26 @@ +# Gatekeeper Configuration + +```mermaid +graph TD + %% Namespace + subgraph gatekeeper-system + audit[Audit Controller
control-plane: audit-controller] + cm[Controller Manager
control-plane: controller-manager] + gpm[Policy Manager
app: gatekeeper-policy-manager] + end + + %% External and K8s Core Components + api[Kubernetes API] + dns[Kube DNS] + prom[Prometheus] + pom[Pomerium] + + %% Edges + audit & cm -->|"53/UDP"| dns + audit -->|"6443/TCP"| api + cm -->|"6443/TCP"| api + gpm -->|"6443/TCP"| api + pom -->|"8080/TCP"| gpm + prom -->|"8888/TCP"| audit & cm + api -->|"8443,443/TCP"| cm +``` diff --git a/docs/network-policies/modules/opa/kyverno.md b/docs/network-policies/modules/opa/kyverno.md new file mode 100644 index 000000000..602c40dc8 --- /dev/null +++ b/docs/network-policies/modules/opa/kyverno.md @@ -0,0 +1,27 @@ +# Kyverno Configuration + +```mermaid +graph TD + %% Namespace + subgraph kyverno + admission[Admission Controller
component: admission-controller] + background[Background Controller
component: background-controller] + reports[Reports Controller
component: reports-controller] + cleanup[Cleanup Controller
component: cleanup-controller] + end + + %% External and K8s Core Components + dns[Kube DNS] + api[Kubernetes API] + + %% Edges + admission -->|"53/UDP"| dns + background -->|"53/UDP"| dns + reports -->|"53/UDP"| dns + cleanup -->|"53/UDP"| dns + admission -->|"6443/TCP"| api + background -->|"6443/TCP"| api + reports -->|"6443/TCP"| api + cleanup -->|"6443/TCP"| api + all[All Namespaces] -->|"9443/TCP"| admission +``` diff --git a/docs/network-policies/modules/tracing/README.md b/docs/network-policies/modules/tracing/README.md new file mode 100644 index 000000000..4db75de3b --- /dev/null +++ b/docs/network-policies/modules/tracing/README.md @@ -0,0 +1,32 @@ +# Tracing Module Network Policies + +## Components +- Tempo + +## Namespaces +- tracing + +## Network Policies List +- deny-all +- all-egress-kube-dns +- tempo-distributed-discovery +- tempo-distributed-ingress-prometheus-metrics +- tempo-gateway-ingress-grafana +- all-egress-tempo-distributor +- tempo-distributor-ingress-traces +- tempo-components-egress-memcached +- memcached-ingress-querier +- tempo-components-egress-https +- tempo-distributed-egress-minio (when using MinIO) +- tempo-distributed-egress-all (when not using MinIO) + +### MinIO +- minio-ingress-namespace +- minio-buckets-setup-egress-kube-apiserver +- minio-buckets-setup-egress-minio +- minio-ingress-prometheus-metrics +- minio-ingress-pomerium +- minio-egress-https + +## Configurations +- [Tempo](tempo.md) diff --git a/docs/network-policies/modules/tracing/tempo.md b/docs/network-policies/modules/tracing/tempo.md new file mode 100644 index 000000000..0ca46d1ba --- /dev/null +++ b/docs/network-policies/modules/tracing/tempo.md @@ -0,0 +1,42 @@ +# Tempo Configuration + +```mermaid +graph TD + %% Namespaces + subgraph tracing + gateway[Tempo Gateway
component: gateway] + dist[Tempo Distributor
component: distributor] + query[Tempo Querier
component: querier] + mem[Memcached
component: memcached] + minio[MinIO
app: minio] + bucket[MinIO Bucket Setup
app: minio-tracing-buckets-setup] + end + + subgraph monitoring + graf[Grafana] + prom[Prometheus] + end + + subgraph pomerium + pom[Pomerium] + end + + allns[All Namespaces] + + %% External and K8s Core Components + dns[Kube DNS] + ext[External] + + %% Edges + gateway & dist & query -->|"53/UDP"| dns + gateway -->|"9095,7946,3100/TCP"| dist & query + dist -->|"9095,7946,3100/TCP"| query + query -->|"11211/TCP"| mem + allns -->|"4317/TCP"| dist + graf -->|"8080/TCP"| gateway + prom -->|"3100/TCP"| gateway & dist & query + pom -->|"9001/TCP"| minio + query -->|"9000/TCP"| minio + minio -->|"443/TCP"| ext + bucket -->|"9000/TCP"| minio +``` \ No newline at end of file diff --git a/docs/network-policies/overview.md b/docs/network-policies/overview.md new file mode 100644 index 000000000..93fbc5033 --- /dev/null +++ b/docs/network-policies/overview.md @@ -0,0 +1,36 @@ +# KFD Network Policies Overview + +```mermaid +graph TD + subgraph kfd[KFD Core Modules] + ingress[Ingress
Nginx + Cert-manager] + auth[Auth
Pomerium] + mon[Monitoring
Prometheus/Mimir] + log[Logging
Opensearch/Loki] + tracing[Tracing
Tempo] + opa[OPA
Gatekeeper/Kyverno] + end + + %% K8s Core Components + dns[KubeDNS] + api[Kubernetes API] + ext[External] + + %% Edges + kfd --->|"53/UDP"| dns + kfd -->|"6443/TCP"| api + ingress -->|"8080/TCP"| auth + auth -->|"auth proxy"| mon & log & tracing & opa + auth -->|"443/TCP"| ext + mon -->|"metrics"| all + mon -->|"metrics"| auth + mon -->|"metrics"| ingress + mon -->|"metrics"| log + mon -->|"metrics"| tracing + mon -->|"metrics"| opa + all[All Namespaces] -->|"logs"| log + all -->|"traces"| tracing + + + +``` \ No newline at end of file diff --git a/docs/releases/v1.1.0.md b/docs/releases/legacy/v1.1.0.md similarity index 100% rename from docs/releases/v1.1.0.md rename to docs/releases/legacy/v1.1.0.md diff --git a/docs/releases/v1.2.0.md b/docs/releases/legacy/v1.2.0.md similarity index 100% rename from docs/releases/v1.2.0.md rename to docs/releases/legacy/v1.2.0.md diff --git a/docs/releases/v1.3.0.md b/docs/releases/legacy/v1.3.0.md similarity index 100% rename from docs/releases/v1.3.0.md rename to docs/releases/legacy/v1.3.0.md diff --git a/docs/releases/v1.4.0.md b/docs/releases/legacy/v1.4.0.md similarity index 100% rename from docs/releases/v1.4.0.md rename to docs/releases/legacy/v1.4.0.md diff --git a/docs/releases/v1.5.0.md b/docs/releases/legacy/v1.5.0.md similarity index 100% rename from docs/releases/v1.5.0.md rename to docs/releases/legacy/v1.5.0.md diff --git a/docs/releases/v1.5.1.md b/docs/releases/legacy/v1.5.1.md similarity index 100% rename from docs/releases/v1.5.1.md rename to docs/releases/legacy/v1.5.1.md diff --git a/docs/releases/v1.6.0.md b/docs/releases/legacy/v1.6.0.md similarity index 100% rename from docs/releases/v1.6.0.md rename to docs/releases/legacy/v1.6.0.md diff --git a/docs/releases/v1.7.0.md b/docs/releases/legacy/v1.7.0.md similarity index 100% rename from docs/releases/v1.7.0.md rename to docs/releases/legacy/v1.7.0.md diff --git a/docs/releases/v1.7.1.md b/docs/releases/legacy/v1.7.1.md similarity index 100% rename from docs/releases/v1.7.1.md rename to docs/releases/legacy/v1.7.1.md diff --git a/docs/releases/v1.25.10.md b/docs/releases/v1.25.10.md index 150604eaa..d57a0ba13 100644 --- a/docs/releases/v1.25.10.md +++ b/docs/releases/v1.25.10.md @@ -32,7 +32,7 @@ This is a maintenance release enabling new features in furyctl automations. Chan ## Upgrade procedure -Check the [upgrade docs](https://github.com/sighupio/furyctl/tree/main/docs/upgrades/kfd) for the detailed procedure. +Check the [upgrade docs](https://github.com/sighupio/furyctl/tree/main/docs/upgrades/kfd/README.md) for the detailed procedure. ## Furyctl compatibility diff --git a/docs/releases/v1.25.8.md b/docs/releases/v1.25.8.md index 7c0cf4a19..49ab942ef 100644 --- a/docs/releases/v1.25.8.md +++ b/docs/releases/v1.25.8.md @@ -32,4 +32,4 @@ The distribution is maintained with ❤️ by the team [SIGHUP](https://sighup.i ## Upgrade procedure -Check the [upgrade docs](https://github.com/sighupio/furyctl/tree/main/docs/upgrades/kfd) for the detailed procedure. +Check the [upgrade docs](https://github.com/sighupio/furyctl/tree/main/docs/upgrades/kfd/README.md) for the detailed procedure. diff --git a/docs/releases/v1.26.0.md b/docs/releases/v1.26.0.md index 39a0c52b6..30427a814 100644 --- a/docs/releases/v1.26.0.md +++ b/docs/releases/v1.26.0.md @@ -47,4 +47,4 @@ This release adds compatibility with Kubernetes 1.26. ## Upgrade procedure -Check the [upgrade docs](https://github.com/sighupio/furyctl/tree/main/docs/upgrades/kfd) for the detailed procedure. +Check the [upgrade docs](https://github.com/sighupio/furyctl/tree/main/docs/upgrades/kfd/README.md) for the detailed procedure. diff --git a/docs/releases/v1.26.1.md b/docs/releases/v1.26.1.md index 46cf965cc..72eb0e614 100644 --- a/docs/releases/v1.26.1.md +++ b/docs/releases/v1.26.1.md @@ -12,4 +12,4 @@ With this release, a single hotfix has been added ## Upgrade procedure -Check the [upgrade docs](https://github.com/sighupio/furyctl/tree/main/docs/upgrades/kfd) for the detailed procedure. +Check the [upgrade docs](https://github.com/sighupio/furyctl/tree/main/docs/upgrades/kfd/README.md) for the detailed procedure. diff --git a/docs/releases/v1.26.2.md b/docs/releases/v1.26.2.md index eed2795ba..c29f3acbb 100644 --- a/docs/releases/v1.26.2.md +++ b/docs/releases/v1.26.2.md @@ -17,4 +17,4 @@ With the `OnPremise` provider, you can effortlessly install preconfigured Kubern ## Upgrade procedure -Check the [upgrade docs](https://github.com/sighupio/furyctl/tree/main/docs/upgrades/kfd) for the detailed procedure. +Check the [upgrade docs](https://github.com/sighupio/furyctl/tree/main/docs/upgrades/kfd/README.md) for the detailed procedure. diff --git a/docs/releases/v1.26.3.md b/docs/releases/v1.26.3.md index 8463ec351..056b30670 100644 --- a/docs/releases/v1.26.3.md +++ b/docs/releases/v1.26.3.md @@ -11,4 +11,4 @@ The distribution is maintained with ❤️ by the team [SIGHUP](https://sighup.i ## Upgrade procedure -Check the [upgrade docs](https://github.com/sighupio/furyctl/tree/main/docs/upgrades/kfd) for the detailed procedure. +Check the [upgrade docs](https://github.com/sighupio/furyctl/tree/main/docs/upgrades/kfd/README.md) for the detailed procedure. diff --git a/docs/releases/v1.26.5.md b/docs/releases/v1.26.5.md index c7cfe7968..1a51e991f 100644 --- a/docs/releases/v1.26.5.md +++ b/docs/releases/v1.26.5.md @@ -34,7 +34,7 @@ This is a maintenance release enabling new features in furyctl automations. Chan ## Upgrade procedure -Check the [upgrade docs](https://github.com/sighupio/furyctl/tree/main/docs/upgrades/kfd) for the detailed procedure. +Check the [upgrade docs](https://github.com/sighupio/furyctl/tree/main/docs/upgrades/kfd/README.md) for the detailed procedure. ## Furyctl compatibility diff --git a/docs/releases/v1.26.6.md b/docs/releases/v1.26.6.md index b85bbeca2..6a2b2b3f0 100644 --- a/docs/releases/v1.26.6.md +++ b/docs/releases/v1.26.6.md @@ -105,4 +105,4 @@ This release add the following features: ## Upgrade procedure -Check the [upgrade docs](https://github.com/sighupio/furyctl/tree/main/docs/upgrades/kfd) for the detailed procedure. +Check the [upgrade docs](https://github.com/sighupio/furyctl/tree/main/docs/upgrades/kfd/README.md) for the detailed procedure. diff --git a/docs/releases/v1.27.0.md b/docs/releases/v1.27.0.md index bc63273b7..8fc97f189 100644 --- a/docs/releases/v1.27.0.md +++ b/docs/releases/v1.27.0.md @@ -53,4 +53,4 @@ This release add the following features: ## Upgrade procedure -Check the [upgrade docs](https://github.com/sighupio/furyctl/tree/main/docs/upgrades/kfd) for the detailed procedure. +Check the [upgrade docs](https://github.com/sighupio/furyctl/tree/main/docs/upgrades/kfd/README.md) for the detailed procedure. diff --git a/docs/releases/v1.27.1.md b/docs/releases/v1.27.1.md index 8606dac65..fccabcee3 100644 --- a/docs/releases/v1.27.1.md +++ b/docs/releases/v1.27.1.md @@ -10,4 +10,4 @@ The distribution is maintained with ❤️ by the team [SIGHUP](https://sighup.i ## Upgrade procedure -Check the [upgrade docs](https://github.com/sighupio/furyctl/tree/main/docs/upgrades/kfd) for the detailed procedure. +Check the [upgrade docs](https://github.com/sighupio/furyctl/tree/main/docs/upgrades/kfd/README.md) for the detailed procedure. diff --git a/docs/releases/v1.27.2.md b/docs/releases/v1.27.2.md index 16e375e87..5b91d2a3d 100644 --- a/docs/releases/v1.27.2.md +++ b/docs/releases/v1.27.2.md @@ -30,4 +30,4 @@ This is a maintenance release enabling new features in furyctl automations. Chan ## Upgrade procedure -Check the [upgrade docs](https://github.com/sighupio/furyctl/tree/main/docs/upgrades/kfd) for the detailed procedure. +Check the [upgrade docs](https://github.com/sighupio/furyctl/tree/main/docs/upgrades/kfd/README.md) for the detailed procedure. diff --git a/docs/releases/v1.27.3.md b/docs/releases/v1.27.3.md index 72e7a244d..d057746b7 100644 --- a/docs/releases/v1.27.3.md +++ b/docs/releases/v1.27.3.md @@ -15,7 +15,7 @@ This is a maintenance release with focus on improving the overall stability of t ## Upgrade procedure -Check the [upgrade docs](https://github.com/sighupio/furyctl/tree/main/docs/upgrades/kfd) for the detailed procedure. +Check the [upgrade docs](https://github.com/sighupio/furyctl/tree/main/docs/upgrades/kfd/README.md) for the detailed procedure. ## Furyctl compatibility diff --git a/docs/releases/v1.27.4.md b/docs/releases/v1.27.4.md index 18c6f4699..c1f6870c2 100644 --- a/docs/releases/v1.27.4.md +++ b/docs/releases/v1.27.4.md @@ -12,7 +12,7 @@ This is a maintenance release with focus on improving the overall stability of t ## Upgrade procedure -Check the [upgrade docs](https://github.com/sighupio/furyctl/tree/main/docs/upgrades/kfd) for the detailed procedure. +Check the [upgrade docs](https://github.com/sighupio/furyctl/tree/main/docs/upgrades/kfd/README.md) for the detailed procedure. ## Furyctl compatibility diff --git a/docs/releases/v1.27.5.md b/docs/releases/v1.27.5.md index e54dda6f1..873da2743 100644 --- a/docs/releases/v1.27.5.md +++ b/docs/releases/v1.27.5.md @@ -105,4 +105,4 @@ This release add the following features: ## Upgrade procedure -Check the [upgrade docs](https://github.com/sighupio/furyctl/tree/main/docs/upgrades/kfd) for the detailed procedure. +Check the [upgrade docs](https://github.com/sighupio/furyctl/tree/main/docs/upgrades/kfd/README.md) for the detailed procedure. diff --git a/docs/releases/v1.27.6.md b/docs/releases/v1.27.6.md index 877efb6e4..520c25421 100644 --- a/docs/releases/v1.27.6.md +++ b/docs/releases/v1.27.6.md @@ -60,4 +60,4 @@ spec: ## Upgrade procedure -Check the [upgrade docs](https://github.com/sighupio/furyctl/tree/main/docs/upgrades/kfd) for the detailed procedure. +Check the [upgrade docs](https://github.com/sighupio/furyctl/tree/main/docs/upgrades/kfd/README.md) for the detailed procedure. diff --git a/docs/releases/v1.27.7.md b/docs/releases/v1.27.7.md index 6272ecaee..80a2a494f 100644 --- a/docs/releases/v1.27.7.md +++ b/docs/releases/v1.27.7.md @@ -101,4 +101,4 @@ The distribution is maintained with ❤️ by the team [SIGHUP](https://sighup.i ## Upgrade procedure -Check the [upgrade docs](https://github.com/sighupio/furyctl/tree/main/docs/upgrades/kfd) for the detailed procedure. +Check the [upgrade docs](https://github.com/sighupio/furyctl/tree/main/docs/upgrades/kfd/README.md) for the detailed procedure. diff --git a/docs/releases/v1.27.8.md b/docs/releases/v1.27.8.md index 5b0aed644..6821a774d 100644 --- a/docs/releases/v1.27.8.md +++ b/docs/releases/v1.27.8.md @@ -16,7 +16,17 @@ No changes ## New features 🌟 -No new features. +- **AUTH configurable expiration**: Now Dex can be configured to have a custom expiration for ID tokens and signing keys. An example configuration: + + ```yaml + ... + auth: + dex: + expiry: + signingKeys: "6h" + idTokens: "24h" + ... + ``` ## Fixes 🐞 @@ -24,4 +34,4 @@ No new features. ## Upgrade procedure -Check the [upgrade docs](https://github.com/sighupio/furyctl/tree/main/docs/upgrades/kfd) for the detailed procedure. +Check the [upgrade docs](https://github.com/sighupio/furyctl/tree/main/docs/upgrades/kfd/README.md) for the detailed procedure. diff --git a/docs/releases/v1.28.0.md b/docs/releases/v1.28.0.md index 4ec17a424..e84ec47b0 100644 --- a/docs/releases/v1.28.0.md +++ b/docs/releases/v1.28.0.md @@ -107,4 +107,4 @@ This release add the following features: ## Upgrade procedure -Check the [upgrade docs](https://github.com/sighupio/furyctl/tree/main/docs/upgrades/kfd) for the detailed procedure. +Check the [upgrade docs](https://github.com/sighupio/furyctl/tree/main/docs/upgrades/kfd/README.md) for the detailed procedure. diff --git a/docs/releases/v1.28.1.md b/docs/releases/v1.28.1.md index 1563d24fb..26e2c2ee6 100644 --- a/docs/releases/v1.28.1.md +++ b/docs/releases/v1.28.1.md @@ -60,4 +60,4 @@ spec: ## Upgrade procedure -Check the [upgrade docs](https://github.com/sighupio/furyctl/tree/main/docs/upgrades/kfd) for the detailed procedure. +Check the [upgrade docs](https://github.com/sighupio/furyctl/tree/main/docs/upgrades/kfd/README.md) for the detailed procedure. diff --git a/docs/releases/v1.28.2.md b/docs/releases/v1.28.2.md index a920839f7..16be7c49c 100644 --- a/docs/releases/v1.28.2.md +++ b/docs/releases/v1.28.2.md @@ -101,4 +101,4 @@ The distribution is maintained with ❤️ by the team [SIGHUP](https://sighup.i ## Upgrade procedure -Check the [upgrade docs](https://github.com/sighupio/furyctl/tree/main/docs/upgrades/kfd) for the detailed procedure. +Check the [upgrade docs](https://github.com/sighupio/furyctl/tree/main/docs/upgrades/kfd/README.md) for the detailed procedure. diff --git a/docs/releases/v1.28.3.md b/docs/releases/v1.28.3.md index 3ee630c69..69317bf27 100644 --- a/docs/releases/v1.28.3.md +++ b/docs/releases/v1.28.3.md @@ -34,4 +34,4 @@ No changes ## Upgrade procedure -Check the [upgrade docs](https://github.com/sighupio/furyctl/tree/main/docs/upgrades/kfd) for the detailed procedure. +Check the [upgrade docs](https://github.com/sighupio/furyctl/tree/main/docs/upgrades/kfd/README.md) for the detailed procedure. diff --git a/docs/releases/v1.28.5.md b/docs/releases/v1.28.5.md new file mode 100644 index 000000000..34981617e --- /dev/null +++ b/docs/releases/v1.28.5.md @@ -0,0 +1,206 @@ +# Kubernetes Fury Distribution Release v1.29.5 + +Welcome to KFD release `v1.29.5`. This patch release also updates Kubernetes from 1.29.3 to 1.29.10 on the OnPremises provider. + +The distribution is maintained with ❤️ by the team [SIGHUP](https://sighup.io/). + +## New Features since `v1.29.4` + +### Installer Updates + +- [on-premises](https://github.com/sighupio/fury-kubernetes-on-premises) 📦 installer: [**v1.30.6**](https://github.com/sighupio/fury-kubernetes-on-premises/releases/tag/v1.30.6) + - Updated etcd default version to 3.5.15 + - Updated HAProxy version to 3.0 TLS + - Updated containerd default version to 1.7.23 + - Added support for Kubernetes versions 1.30.6, 1.29.10 and 1.28.15 +- [eks](https://github.com/sighupio/fury-eks-installer) 📦 installer: [**v3.2.0**](https://github.com/sighupio/fury-eks-installer/releases/tag/v3.2.0) + - Introduced AMI selection type: `alinux2023` and `alinux2` + - Fixed eks-managed nodepool node labels + +### Module updates + +- [networking](https://github.com/sighupio/fury-kubernetes-networking) 📦 core module: [**v2.0.0**](https://github.com/sighupio/fury-kubernetes-networking/releases/tag/v2.0.0) + - Updated Tigera operator to v1.36.1 (that includes calico v3.29.0) + - Updated Cilium to v1.16.3 +- [monitoring](https://github.com/sighupio/fury-kubernetes-monitoring) 📦 core module: [**v3.3.0**](https://github.com/sighupio/fury-kubernetes-monitoring/releases/tag/v3.3.0) + - Updated blackbox-exporter to v0.25.0 + - Updated grafana to v11.3.0 + - Updated kube-rbac-proxy to v0.18.1 + - Updated kube-state-metrics to v2.13.0 + - Updated node-exporter to v1.8.2 + - Updated prometheus-adapter to v0.12.0 + - Updated prometheus-operator to v0.76.2 + - Updated prometheus to v2.54.1 + - Updated x509-exporter to v3.17.0 + - Updated mimir to v2.14.0 + - Updated minio to version RELEASE.2024-10-13T13-34-11Z +- [logging](https://github.com/sighupio/fury-kubernetes-logging) 📦 core module: [**v4.0.0**](https://github.com/sighupio/fury-kubernetes-logging/releases/tag/v4.0.0) + - Updated opensearch and opensearch-dashboards to v2.17.1 + - Updated logging-operator to v4.10.0 + - Updated loki to v2.9.10 + - Updated minio to version RELEASE.2024-10-13T13-34-11Z +- [ingress](https://github.com/sighupio/fury-kubernetes-ingress) 📦 core module: [**v3.0.1**](https://github.com/sighupio/fury-kubernetes-ingress/releases/tag/v3.0.1) + - Updated cert-manager to v1.16.1 + - Updated external-dns to v0.15.0 + - Updated forecastle to v1.0.145 + - Updated nginx to v1.11.3 +- [auth](https://github.com/sighupio/fury-kubernetes-auth) 📦 core module: [**v0.4.0**](https://github.com/sighupio/fury-kubernetes-auth/releases/tag/v0.4.0) + - Updated dex to v2.41.1 + - Updated pomerium to v0.27.1 +- [dr](https://github.com/sighupio/fury-kubernetes-dr) 📦 core module: [**v3.0.0**](https://github.com/sighupio/fury-kubernetes-dr/releases/tag/v3.0.0) + - Updated velero to v1.15.0 + - Updated all velero plugins to v1.11.0 + - Added snapshot-controller v8.0.1 +- [tracing](https://github.com/sighupio/fury-kubernetes-tracing) 📦 core module: [**v1.1.0**](https://github.com/sighupio/fury-kubernetes-tracing/releases/tag/v1.1.0) + - Updated tempo to v2.6.0 + - Updated minio to version RELEASE.2024-10-13T13-34-11Z +- [opa](https://github.com/sighupio/fury-kubernetes-opa) 📦 core module: [**v1.13.0**](https://github.com/sighupio/fury-kubernetes-opa/releases/tag/v1.13.0) + - Updated gatekeeper to v3.17.1 + - Updated gatekeeper-policy-manager to v1.0.13 + - Updated kyverno to v1.12.6 +- [aws](https://github.com/sighupio/fury-kubernetes-aws) 📦 module: [**v4.3.0**](https://github.com/sighupio/fury-kubernetes-aws/releases/tag/v4.3.0) + - Updated cluster-autoscaler to v1.30.0 + - Updated snapshot-controller to v8.1.0 + - Updated aws-load-balancer-controller to v2.10.0 + - Updated node-termination-handler to v1.22.0 + +## Breaking changes 💔 + +- **Loki store and schema change**: A new store and schema has been introduced in order to improve efficiency, speed and scalability of Loki clusters. See "[New features](#new-features-)" below for more details. +- **DR schema change**: A new format for the schedule customization has been introduced to improve the usability. See "[New Features](#new-features-)" section below for more details. +- **Kyverno validation failure action**: Kyverno has deprecated `audit` and `enforce` as valid options for the `validationFailureAction`, valid options are now `Audit` and `Enforce`, in title case. Adjust your `.spec.distribution.modules.policy.kyverno.validationFailureAction` value accordingly. + +## New features 🌟 + +- **New option for Logging**: Loki's configuration has been extended to accommodate a new `tsdbStartDate` **required** option to allow a migration towards TSDB and schema v13 storage (note: **this is a breaking change**): + + ```yaml + ... + spec: + distribution: + modules: + logging: + loki: + tsdbStartDate: "2024-11-18" + ... + ``` + + - `tsdbStartDate` (**required**): a string in `ISO 8601` date format that represents the day starting from which Loki will record logs with the new store and schema. + + ℹ️ **Note**: Loki will assume the start of the day on the UTC midnight of the specified day. + +- **Improved configurable schedules for DR backups**: the schedule configuration has been updated to enhance the usability of schedule customization (note: **this is a breaking change**): + + ```yaml + ... + spec: + distribution: + modules: + dr: + velero: + schedules: + install: true + definitions: + manifests: + schedule: "*/15 * * * *" + ttl: "720h0m0s" + full: + schedule: "0 1 * * *" + ttl: "720h0m0s" + snapshotMoveData: false + ... + ``` + +- **DR snapshotMoveData options for full schedule**: a new parameter has been introduced in the velero `full` schedule to enable the `snapshotMoveData` feature. This feature allows data captured from a snapshot to be copied to the object storage location. **Important**: Setting this parameter to `true` will cause Velero to upload all data from the snapshotted volumes to S3 using Kopia. While backups are deduplicated, significant storage usage is still expected. To enable this use the following parameter in the full schedule configuration: + + ```yaml + ... + spec: + distribution: + modules: + dr: + velero: + schedules: + install: true + definitions: + full: + snapshotMoveData: true + ... + ``` + +General example to enable Volume Snapshotting on rook-ceph (from our storage add-on module): + + ```yaml + apiVersion: snapshot.storage.k8s.io/v1 + kind: VolumeSnapshotClass + metadata: + name: velero-snapclass + labels: + velero.io/csi-volumesnapshot-class: "true" + driver: rook-ceph.rbd.csi.ceph.com + parameters: + clusterID: rook-ceph + csi.storage.k8s.io/snapshotter-secret-name: rook-csi-rbd-provisioner + csi.storage.k8s.io/snapshotter-secret-namespace: rook-ceph + deletionPolicy: Retain + ``` + +`deletionPolicy: Retain` is important because if the volume snapshot is deleted from the namespace, the cluster wide `volumesnapshotcontent` CR will be preserved, maintaining the snapshot on the storage that the cluster is using. + +**NOTE**: For EKSCluster provider, a default VolumeSnapshotClass is created automatically. + +- **DR optional snapshot-controller installation**: To leverage VolumeSnapshots on the OnPremises and KFDDistribution providers, a new option on velero has been added to install the snapshot-controller component. Before activating this parameter make sure that in your cluster there is not another snapshot-controller component deployed. By default this parameter is `false`. + + ```yaml + ... + spec: + distribution: + modules: + dr: + velero: + snapshotController: + install: true + ... + ``` + +- **Prometheus ScrapeConfigs**: the Monitoring module now enables by default the `scrapeConfig` CRDs from the Prometheus Operator. All the scrapeConfig objects present in the cluster will now be detected by the operator. `ScrapeConfig` objects are used to instruct Prometheus to scrape specific endpoints that could be outside the cluster. + +- **Components Hardening**: we hardened the security context of several components, improving the out-of-the-box security of the distribution. + +- **On-premises minimal clusters**: it is now possible to create clusters with only control-plane nodes, for minimal clusters installations that need to handle minimal workloads. + +- **Helm Plugins**: Helm plugins now allow disabling validation at installation time with the `disableValidationOnInstall` option. This can be useful when installing Helm charts that fail the diff step on a first installation, for example. + +- **Network Policies** (experimental 🧪): a new experimental feature is introduced in this version. You can now enable the installation of network policies that will restrict the traffic across all the infrastructural namespaces of KFD to just the access needed for its proper functioning and denying the rest of it. Improving the overall security of the cluster. This experimental feature is only available in OnPremises cluster at the moment. Read more in the [Pull Request](https://github.com/sighupio/fury-distribution/pull/302) introducing the feature and in the [relative documentation](https://github.com/sighupio/fury-distribution/tree/main/docs/network-policies). + +- **Global CVE patched images for core modules**: This distribution version includes images that have been patched for OS vulnerabilities (CVE). To use these patched images, select the following option: + ```yaml + ... + spec: + distribution: + common: + registry: registry.sighup.io/fury-secured + ... + ``` + + +## Fixes 🐞 + +- Improved Configuration Schema documentation: documentation for the configuration schemas was lacking, we greatly improved the quality and quantity of the documentation regarding each option in the schemas, for all the configuration kinds (OnPremises, EKSCluster, KFDDistribution). +- [[#264](https://github.com/sighupio/fury-distribution/pull/264)] Hubble UI: now is shown in the right group in the Directory +- [[#277](https://github.com/sighupio/fury-distribution/pull/277)] Hubble UI: make it work when auth type is SSO. +- [[#275](https://github.com/sighupio/fury-distribution/pull/275)] On-premises: use the `org` parameter for additional created users, it was being ignored before. +- [[#279](https://github.com/sighupio/fury-distribution/pull/279)] Monitoring: don't install x509 data plane on EKS clusters because it is not needed and triggers false alerts. +- [[#280](https://github.com/sighupio/fury-distribution/pull/280)] Migrations: fix migration from Auth type from `sso` to `basicAuth` and viceversa. +- [[#281](https://github.com/sighupio/fury-distribution/pull/281)] Migrations: some ingresses were not being deleted when migrating to Ingress type `none`. +- [[#281](https://github.com/sighupio/fury-distribution/pull/281)] Ingress: don't create TLS secret when ingress type is `none`. +- [[#283](https://github.com/sighupio/fury-distribution/pull/283)] EKS schema validation: fix DNS validation depending on if nginx is single, dual or none. +- [[#291](https://github.com/sighupio/fury-distribution/pull/291)] Monitoring: `minio-monitoring` ingress is now working when SSO is enabled. +- [[#291](https://github.com/sighupio/fury-distribution/pull/291)] Tracing: `minio-tracing` ingress is now created when Logging type is `none` and `auth.type` is `sso`. +- [[#293](https://github.com/sighupio/fury-distribution/pull/293)] Monitoring migrations: remove `minio-monitoring` ingress when migrating monitoring type from `mimir` to `none`. +- [[#301](https://github.com/sighupio/fury-distribution/pull/301)] Migrations: fix an error on the concatenation of kustomize bases. `external-dns` and `opensearch` are properly deleted now and no components are left behind. +- [[#310](https://github.com/sighupio/fury-distribution/pull/310)] Migrations: fix an error while migrating from auth type `none` to `sso` related to old ingresses not being deleted first. + +## Upgrade procedure + +Check the [upgrade docs](https://docs.kubernetesfury.com/docs/upgrades/upgrades) for the detailed procedure. diff --git a/docs/schemas/ekscluster-kfd-v1alpha2.md b/docs/schemas/ekscluster-kfd-v1alpha2.md index 89a4bee81..e4a0eed68 100644 --- a/docs/schemas/ekscluster-kfd-v1alpha2.md +++ b/docs/schemas/ekscluster-kfd-v1alpha2.md @@ -2,8 +2,14 @@ This document explains the full schema for the `kind: EKSCluster` for the `furyctl.yaml` file used by `furyctl`. This configuration file will be used to deploy a Kubernetes Fury Cluster deployed through AWS's Elastic Kubernetes Service. -An example file can be found [here](https://github.com/sighupio/fury-distribution/blob/feature/schema-docs/templates/config/ekscluster-kfd-v1alpha2.yaml.tpl). +An example configuration file can be created by running the following command: +```bash +furyctl create config --kind EKSCluster --version v1.29.4 --name example-cluster +``` + +> [!NOTE] +> Replace the version with your desired version of KFD. ## Properties | Property | Type | Required | @@ -15,7 +21,7 @@ An example file can be found [here](https://github.com/sighupio/fury-distributio ### Description -A Fury Cluster deployed through AWS's Elastic Kubernetes Service +A KFD Cluster deployed on top of AWS's Elastic Kubernetes Service (EKS). ## .apiVersion @@ -33,7 +39,7 @@ A Fury Cluster deployed through AWS's Elastic Kubernetes Service ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------| @@ -49,6 +55,10 @@ A Fury Cluster deployed through AWS's Elastic Kubernetes Service ## .metadata.name +### Description + +The name of the cluster. It will also be used as a prefix for all the other resources created. + ### Constraints **maximum length**: the maximum number of characters for this string is: `56` @@ -92,11 +102,15 @@ A Fury Cluster deployed through AWS's Elastic Kubernetes Service | [relativeVendorPath](#specdistributioncommonrelativevendorpath) | `string` | Optional | | [tolerations](#specdistributioncommontolerations) | `array` | Optional | +### Description + +Common configuration for all the distribution modules. + ## .spec.distribution.common.nodeSelector ### Description -The node selector to use to place the pods for all the KFD modules +The node selector to use to place the pods for all the KFD modules. Follows Kubernetes selector format. Example: `node.kubernetes.io/role: infra`. ## .spec.distribution.common.provider @@ -110,21 +124,21 @@ The node selector to use to place the pods for all the KFD modules ### Description -The type of the provider, must be EKS if specified +The provider type. Don't set. FOR INTERNAL USE ONLY. ## .spec.distribution.common.registry ### Description -URL of the registry where to pull images from for the Distribution phase. (Default is registry.sighup.io/fury). +URL of the registry where to pull images from for the Distribution phase. (Default is `registry.sighup.io/fury`). -NOTE: If plugins are pulling from the default registry, the registry will be replaced for these plugins too. +NOTE: If plugins are pulling from the default registry, the registry will be replaced for the plugin too. ## .spec.distribution.common.relativeVendorPath ### Description -The relative path to the vendor directory, does not need to be changed +The relative path to the vendor directory, does not need to be changed. ## .spec.distribution.common.tolerations @@ -139,13 +153,19 @@ The relative path to the vendor directory, does not need to be changed ### Description -The tolerations that will be added to the pods for all the KFD modules +An array with the tolerations that will be added to the pods for all the KFD modules. Follows Kubernetes tolerations format. Example: + +```yaml +- effect: NoSchedule + key: node.kubernetes.io/role + value: infra +``` ## .spec.distribution.common.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -163,7 +183,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -210,7 +230,7 @@ The behavior of the configmap ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:----------| @@ -418,7 +438,7 @@ The behavior of the secret ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:----------| @@ -525,11 +545,15 @@ The type of the secret | [pomerium](#specdistributionmodulesauthpomerium) | `object` | Optional | | [provider](#specdistributionmodulesauthprovider) | `object` | Required | +### Description + +Configuration for the Auth module. + ## .spec.distribution.modules.auth.baseDomain ### Description -The base domain for the auth module +The base domain for the ingresses created by the Auth module (Gangplank, Pomerium, Dex). Notice that when the ingress module type is `dual`, these will use the `external` ingress class. ## .spec.distribution.modules.auth.dex @@ -542,17 +566,32 @@ The base domain for the auth module | [expiry](#specdistributionmodulesauthdexexpiry) | `object` | Optional | | [overrides](#specdistributionmodulesauthdexoverrides) | `object` | Optional | +### Description + +Configuration for the Dex package. + ## .spec.distribution.modules.auth.dex.additionalStaticClients ### Description -The additional static clients for dex +Additional static clients defitions that will be added to the default clients included with the distribution in Dex's configuration. Example: + +```yaml +additionalStaticClients: + - id: my-custom-client + name: "A custom additional static client" + redirectURIs: + - "https://myapp.tld/redirect" + - "https://alias.tld/oidc-callback" + secret: supersecretpassword +``` +Reference: https://dexidp.io/docs/connectors/local/ ## .spec.distribution.modules.auth.dex.connectors ### Description -The connectors for dex +A list with each item defining a Dex connector. Follows Dex connectors configuration format: https://dexidp.io/docs/connectors/ ## .spec.distribution.modules.auth.dex.expiry @@ -588,7 +627,7 @@ Dex signing key expiration time duration (default 6h). ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.auth.dex.overrides.tolerations @@ -603,13 +642,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.auth.dex.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -627,7 +666,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -650,13 +689,21 @@ The value of the toleration | [nodeSelector](#specdistributionmodulesauthoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulesauthoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the Auth module. + ## .spec.distribution.modules.auth.overrides.ingresses +### Description + +Override the definition of the Auth module ingresses. + ## .spec.distribution.modules.auth.overrides.nodeSelector ### Description -The node selector to use to place the pods for the auth module +Set to override the node selector used to place the pods of the Auth module. ## .spec.distribution.modules.auth.overrides.tolerations @@ -671,13 +718,13 @@ The node selector to use to place the pods for the auth module ### Description -The tolerations that will be added to the pods for the auth module +Set to override the tolerations that will be added to the pods of the Auth module. ## .spec.distribution.modules.auth.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -695,7 +742,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -791,7 +838,7 @@ override default routes for KFD components ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -805,7 +852,7 @@ override default routes for KFD components ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -895,27 +942,36 @@ cat ec_private.pem | base64 | [password](#specdistributionmodulesauthproviderbasicauthpassword) | `string` | Required | | [username](#specdistributionmodulesauthproviderbasicauthusername) | `string` | Required | +### Description + +Configuration for the HTTP Basic Auth provider. + ## .spec.distribution.modules.auth.provider.basicAuth.password ### Description -The password for the basic auth +The password for logging in with the HTTP basic authentication. ## .spec.distribution.modules.auth.provider.basicAuth.username ### Description -The username for the basic auth +The username for logging in with the HTTP basic authentication. ## .spec.distribution.modules.auth.provider.type ### Description -The type of the provider, must be ***none***, ***sso*** or ***basicAuth*** +The type of the Auth provider, options are: +- `none`: will disable authentication in the infrastructural ingresses. +- `sso`: will protect the infrastructural ingresses with Pomerium and Dex (SSO) and require authentication before accessing them. +- `basicAuth`: will protect the infrastructural ingresses with HTTP basic auth (username and password) authentication. + +Default is `none`. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:------------| @@ -969,7 +1025,7 @@ The type of the provider, must be ***none***, ***sso*** or ***basicAuth*** ### Description -The node selector to use to place the pods for the load balancer controller module +The node selector to use to place the pods for the load balancer controller module. ## .spec.distribution.modules.aws.clusterAutoscaler.overrides.tolerations @@ -984,13 +1040,13 @@ The node selector to use to place the pods for the load balancer controller modu ### Description -The tolerations that will be added to the pods for the cluster autoscaler module +The tolerations that will be added to the pods for the cluster autoscaler module. ## .spec.distribution.modules.aws.clusterAutoscaler.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1008,7 +1064,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1055,7 +1111,7 @@ The value of the toleration ### Description -The node selector to use to place the pods for the load balancer controller module +The node selector to use to place the pods for the load balancer controller module. ## .spec.distribution.modules.aws.ebsCsiDriver.overrides.tolerations @@ -1070,13 +1126,13 @@ The node selector to use to place the pods for the load balancer controller modu ### Description -The tolerations that will be added to the pods for the cluster autoscaler module +The tolerations that will be added to the pods for the cluster autoscaler module. ## .spec.distribution.modules.aws.ebsCsiDriver.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1094,7 +1150,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1128,7 +1184,7 @@ The value of the toleration ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.aws.ebsSnapshotController.overrides.tolerations @@ -1143,13 +1199,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.aws.ebsSnapshotController.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1167,7 +1223,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1214,7 +1270,7 @@ The value of the toleration ### Description -The node selector to use to place the pods for the load balancer controller module +The node selector to use to place the pods for the load balancer controller module. ## .spec.distribution.modules.aws.loadBalancerController.overrides.tolerations @@ -1229,13 +1285,13 @@ The node selector to use to place the pods for the load balancer controller modu ### Description -The tolerations that will be added to the pods for the cluster autoscaler module +The tolerations that will be added to the pods for the cluster autoscaler module. ## .spec.distribution.modules.aws.loadBalancerController.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1253,7 +1309,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1276,13 +1332,17 @@ The value of the toleration | [nodeSelector](#specdistributionmodulesawsoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulesawsoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the module. + ## .spec.distribution.modules.aws.overrides.ingresses ## .spec.distribution.modules.aws.overrides.nodeSelector ### Description -The node selector to use to place the pods for the dr module +Set to override the node selector used to place the pods of the module. ## .spec.distribution.modules.aws.overrides.tolerations @@ -1297,13 +1357,13 @@ The node selector to use to place the pods for the dr module ### Description -The tolerations that will be added to the pods for the monitoring module +Set to override the tolerations that will be added to the pods of the module. ## .spec.distribution.modules.aws.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1321,7 +1381,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1344,6 +1404,10 @@ The value of the toleration | [type](#specdistributionmodulesdrtype) | `string` | Required | | [velero](#specdistributionmodulesdrvelero) | `object` | Optional | +### Description + +Configuration for the Disaster Recovery module. + ## .spec.distribution.modules.dr.overrides ### Properties @@ -1354,13 +1418,17 @@ The value of the toleration | [nodeSelector](#specdistributionmodulesdroverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulesdroverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the module. + ## .spec.distribution.modules.dr.overrides.ingresses ## .spec.distribution.modules.dr.overrides.nodeSelector ### Description -The node selector to use to place the pods for the dr module +Set to override the node selector used to place the pods of the module. ## .spec.distribution.modules.dr.overrides.tolerations @@ -1375,13 +1443,13 @@ The node selector to use to place the pods for the dr module ### Description -The tolerations that will be added to the pods for the monitoring module +Set to override the tolerations that will be added to the pods of the module. ## .spec.distribution.modules.dr.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1399,7 +1467,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1416,11 +1484,13 @@ The value of the toleration ### Description -The type of the DR, must be ***none*** or ***eks*** +The type of the Disaster Recovery, must be `none` or `eks`. `none` disables the module and `eks` will install Velero and use an S3 bucket to store the backups. + +Default is `none`. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------| @@ -1435,6 +1505,7 @@ The type of the DR, must be ***none*** or ***eks*** |:-------------------------------------------------------|:---------|:---------| | [eks](#specdistributionmodulesdrveleroeks) | `object` | Required | | [overrides](#specdistributionmodulesdrvelerooverrides) | `object` | Optional | +| [schedules](#specdistributionmodulesdrveleroschedules) | `object` | Optional | ## .spec.distribution.modules.dr.velero.eks @@ -1449,17 +1520,17 @@ The type of the DR, must be ***none*** or ***eks*** ### Description -The name of the velero bucket +The name of the bucket for Velero. ## .spec.distribution.modules.dr.velero.eks.region ### Description -The region where the velero bucket is located +The region where the bucket for Velero will be located. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-----------------| @@ -1506,7 +1577,7 @@ The region where the velero bucket is located ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.dr.velero.overrides.tolerations @@ -1521,13 +1592,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.dr.velero.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1545,7 +1616,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1558,6 +1629,95 @@ The key of the toleration The value of the toleration +## .spec.distribution.modules.dr.velero.schedules + +### Properties + +| Property | Type | Required | +|:--------------------------------------------------------------------|:----------|:---------| +| [definitions](#specdistributionmodulesdrveleroschedulesdefinitions) | `object` | Optional | +| [install](#specdistributionmodulesdrveleroschedulesinstall) | `boolean` | Optional | + +### Description + +Configuration for Velero's backup schedules. + +## .spec.distribution.modules.dr.velero.schedules.definitions + +### Properties + +| Property | Type | Required | +|:---------------------------------------------------------------------------|:---------|:---------| +| [full](#specdistributionmodulesdrveleroschedulesdefinitionsfull) | `object` | Optional | +| [manifests](#specdistributionmodulesdrveleroschedulesdefinitionsmanifests) | `object` | Optional | + +### Description + +Configuration for Velero schedules. + +## .spec.distribution.modules.dr.velero.schedules.definitions.full + +### Properties + +| Property | Type | Required | +|:---------------------------------------------------------------------------------------------|:----------|:---------| +| [schedule](#specdistributionmodulesdrveleroschedulesdefinitionsfullschedule) | `string` | Optional | +| [snapshotMoveData](#specdistributionmodulesdrveleroschedulesdefinitionsfullsnapshotmovedata) | `boolean` | Optional | +| [ttl](#specdistributionmodulesdrveleroschedulesdefinitionsfullttl) | `string` | Optional | + +### Description + +Configuration for Velero's manifests backup schedule. + +## .spec.distribution.modules.dr.velero.schedules.definitions.full.schedule + +### Description + +The cron expression for the `full` backup schedule (default `0 1 * * *`). + +## .spec.distribution.modules.dr.velero.schedules.definitions.full.snapshotMoveData + +### Description + +EXPERIMENTAL (if you do more than one backups, the following backups after the first are not automatically restorable, see https://github.com/vmware-tanzu/velero/issues/7057#issuecomment-2466815898 for the manual restore solution): SnapshotMoveData specifies whether snapshot data should be moved. Velero will create a new volume from the snapshot and upload the content to the storageLocation. + +## .spec.distribution.modules.dr.velero.schedules.definitions.full.ttl + +### Description + +The Time To Live (TTL) of the backups created by the backup schedules (default `720h0m0s`, 30 days). Notice that changing this value will affect only newly created backups, prior backups will keep the old TTL. + +## .spec.distribution.modules.dr.velero.schedules.definitions.manifests + +### Properties + +| Property | Type | Required | +|:----------------------------------------------------------------------------------|:---------|:---------| +| [schedule](#specdistributionmodulesdrveleroschedulesdefinitionsmanifestsschedule) | `string` | Optional | +| [ttl](#specdistributionmodulesdrveleroschedulesdefinitionsmanifeststtl) | `string` | Optional | + +### Description + +Configuration for Velero's manifests backup schedule. + +## .spec.distribution.modules.dr.velero.schedules.definitions.manifests.schedule + +### Description + +The cron expression for the `manifests` backup schedule (default `*/15 * * * *`). + +## .spec.distribution.modules.dr.velero.schedules.definitions.manifests.ttl + +### Description + +The Time To Live (TTL) of the backups created by the backup schedules (default `720h0m0s`, 30 days). Notice that changing this value will affect only newly created backups, prior backups will keep the old TTL. + +## .spec.distribution.modules.dr.velero.schedules.install + +### Description + +Whether to install or not the default `manifests` and `full` backups schedules. Default is `true`. + ## .spec.distribution.modules.ingress ### Properties @@ -1566,7 +1726,7 @@ The value of the toleration |:----------------------------------------------------------|:---------|:---------| | [baseDomain](#specdistributionmodulesingressbasedomain) | `string` | Required | | [certManager](#specdistributionmodulesingresscertmanager) | `object` | Optional | -| [dns](#specdistributionmodulesingressdns) | `object` | Required | +| [dns](#specdistributionmodulesingressdns) | `object` | Optional | | [forecastle](#specdistributionmodulesingressforecastle) | `object` | Optional | | [nginx](#specdistributionmodulesingressnginx) | `object` | Required | | [overrides](#specdistributionmodulesingressoverrides) | `object` | Optional | @@ -1575,7 +1735,7 @@ The value of the toleration ### Description -the base domain used for all the KFD ingresses, if in the nginx dual configuration, it should be the same as the .spec.distribution.modules.ingress.dns.private.name zone +The base domain used for all the KFD infrastructural ingresses. If in the nginx `dual` configuration type, this value should be the same as the `.spec.distribution.modules.ingress.dns.private.name` zone. ## .spec.distribution.modules.ingress.certManager @@ -1586,6 +1746,10 @@ the base domain used for all the KFD ingresses, if in the nginx dual configurati | [clusterIssuer](#specdistributionmodulesingresscertmanagerclusterissuer) | `object` | Required | | [overrides](#specdistributionmodulesingresscertmanageroverrides) | `object` | Optional | +### Description + +Configuration for the cert-manager package. Required even if `ingress.nginx.type` is `none`, cert-manager is used for managing other certificates in the cluster besides the TLS termination certificates for the ingresses. + ## .spec.distribution.modules.ingress.certManager.clusterIssuer ### Properties @@ -1597,33 +1761,37 @@ the base domain used for all the KFD ingresses, if in the nginx dual configurati | [solvers](#specdistributionmodulesingresscertmanagerclusterissuersolvers) | `array` | Optional | | [type](#specdistributionmodulesingresscertmanagerclusterissuertype) | `string` | Optional | +### Description + +Configuration for the cert-manager's ACME clusterIssuer used to request certificates from Let's Encrypt. + ## .spec.distribution.modules.ingress.certManager.clusterIssuer.email ### Description -The email of the cluster issuer +The email address to use during the certificate issuing process. ## .spec.distribution.modules.ingress.certManager.clusterIssuer.name ### Description -The name of the cluster issuer +The name of the clusterIssuer. ## .spec.distribution.modules.ingress.certManager.clusterIssuer.solvers ### Description -The custom solvers configurations +The list of challenge solvers to use instead of the default one for the `http01` challenge. Check [cert manager's documentation](https://cert-manager.io/docs/configuration/acme/#adding-multiple-solver-types) for examples for this field. ## .spec.distribution.modules.ingress.certManager.clusterIssuer.type ### Description -The type of the cluster issuer, must be ***dns01*** or ***http01*** +The type of the clusterIssuer, must be `dns01` for using DNS challenge or `http01` for using HTTP challenge. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1643,7 +1811,7 @@ The type of the cluster issuer, must be ***dns01*** or ***http01*** ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.ingress.certManager.overrides.tolerations @@ -1658,13 +1826,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.ingress.certManager.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1682,7 +1850,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1702,8 +1870,12 @@ The value of the toleration | Property | Type | Required | |:---------------------------------------------------------|:---------|:---------| | [overrides](#specdistributionmodulesingressdnsoverrides) | `object` | Optional | -| [private](#specdistributionmodulesingressdnsprivate) | `object` | Required | -| [public](#specdistributionmodulesingressdnspublic) | `object` | Required | +| [private](#specdistributionmodulesingressdnsprivate) | `object` | Optional | +| [public](#specdistributionmodulesingressdnspublic) | `object` | Optional | + +### Description + +DNS definition, used in conjunction with `externalDNS` package to automate DNS management and certificates emission. ## .spec.distribution.modules.ingress.dns.overrides @@ -1718,7 +1890,7 @@ The value of the toleration ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.ingress.dns.overrides.tolerations @@ -1733,13 +1905,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.ingress.dns.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1757,7 +1929,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1779,17 +1951,21 @@ The value of the toleration | [create](#specdistributionmodulesingressdnsprivatecreate) | `boolean` | Required | | [name](#specdistributionmodulesingressdnsprivatename) | `string` | Required | +### Description + +The private DNS zone is used only when `ingress.nginx.type` is `dual`, for exposing infrastructural services only in the private DNS zone. + ## .spec.distribution.modules.ingress.dns.private.create ### Description -If true, the private hosted zone will be created +By default, a Terraform data source will be used to get the private DNS zone. Set to `true` to create the private zone instead. ## .spec.distribution.modules.ingress.dns.private.name ### Description -The name of the private hosted zone +The name of the private hosted zone. Example: `internal.fury-demo.sighup.io`. ## .spec.distribution.modules.ingress.dns.public @@ -1804,13 +1980,13 @@ The name of the private hosted zone ### Description -If true, the public hosted zone will be created +By default, a Terraform data source will be used to get the public DNS zone. Set to `true` to create the public zone instead. ## .spec.distribution.modules.ingress.dns.public.name ### Description -The name of the public hosted zone +The name of the public hosted zone. ## .spec.distribution.modules.ingress.forecastle @@ -1833,7 +2009,7 @@ The name of the public hosted zone ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.ingress.forecastle.overrides.tolerations @@ -1848,13 +2024,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.ingress.forecastle.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1872,7 +2048,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1897,7 +2073,7 @@ The value of the toleration ### Description -Configurations for the nginx ingress controller module +Configurations for the Ingress nginx controller package. ## .spec.distribution.modules.ingress.nginx.overrides @@ -1912,7 +2088,7 @@ Configurations for the nginx ingress controller module ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.ingress.nginx.overrides.tolerations @@ -1927,13 +2103,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.ingress.nginx.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1951,7 +2127,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1977,11 +2153,11 @@ The value of the toleration ### Description -The provider of the TLS certificate, must be ***none***, ***certManager*** or ***secret*** +The provider of the TLS certificates for the ingresses, one of: `none`, `certManager`, or `secret`. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:--------------| @@ -1999,25 +2175,42 @@ The provider of the TLS certificate, must be ***none***, ***certManager*** or ** | [cert](#specdistributionmodulesingressnginxtlssecretcert) | `string` | Required | | [key](#specdistributionmodulesingressnginxtlssecretkey) | `string` | Required | +### Description + +Kubernetes TLS secret for the ingresses TLS certificate. + ## .spec.distribution.modules.ingress.nginx.tls.secret.ca +### Description + +The Certificate Authority certificate file's content. You can use the `"{file://}"` notation to get the content from a file. + ## .spec.distribution.modules.ingress.nginx.tls.secret.cert ### Description -The certificate file content or you can use the file notation to get the content from a file +The certificate file's content. You can use the `"{file://}"` notation to get the content from a file. ## .spec.distribution.modules.ingress.nginx.tls.secret.key +### Description + +The signing key file's content. You can use the `"{file://}"` notation to get the content from a file. + ## .spec.distribution.modules.ingress.nginx.type ### Description -The type of the nginx ingress controller, must be ***none***, ***single*** or ***dual*** +The type of the Ingress nginx controller, options are: +- `none`: no ingress controller will be installed and no infrastructural ingresses will be created. +- `single`: a single ingress controller with ingress class `nginx` will be installed to manage all the ingress resources, infrastructural ingresses will be created. +- `dual`: two independent ingress controllers will be installed, one for the `internal` ingress class intended for private ingresses and one for the `external` ingress class intended for public ingresses. KFD infrastructural ingresses wil use the `internal` ingress class when using the dual type. + +Default is `single`. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2035,6 +2228,10 @@ The type of the nginx ingress controller, must be ***none***, ***single*** or ** | [nodeSelector](#specdistributionmodulesingressoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulesingressoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the Ingress module. + ## .spec.distribution.modules.ingress.overrides.ingresses ### Properties @@ -2057,25 +2254,25 @@ The type of the nginx ingress controller, must be ***none***, ***single*** or ** ### Description -If true, the ingress will not have authentication +If true, the ingress will not have authentication even if `.spec.modules.auth.provider.type` is SSO or Basic Auth. ## .spec.distribution.modules.ingress.overrides.ingresses.forecastle.host ### Description -The host of the ingress +Use this host for the ingress instead of the default one. ## .spec.distribution.modules.ingress.overrides.ingresses.forecastle.ingressClass ### Description -The ingress class of the ingress +Use this ingress class for the ingress instead of the default one. ## .spec.distribution.modules.ingress.overrides.nodeSelector ### Description -The node selector to use to place the pods for the ingress module +Set to override the node selector used to place the pods of the Ingress module. ## .spec.distribution.modules.ingress.overrides.tolerations @@ -2090,13 +2287,13 @@ The node selector to use to place the pods for the ingress module ### Description -The tolerations that will be added to the pods for the ingress module +Set to override the tolerations that will be added to the pods of the Ingress module. ## .spec.distribution.modules.ingress.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2114,7 +2311,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2142,6 +2339,10 @@ The value of the toleration | [overrides](#specdistributionmodulesloggingoverrides) | `object` | Optional | | [type](#specdistributionmodulesloggingtype) | `string` | Required | +### Description + +Configuration for the Logging module. + ## .spec.distribution.modules.logging.cerebro ### Properties @@ -2150,6 +2351,10 @@ The value of the toleration |:-------------------------------------------------------------|:---------|:---------| | [overrides](#specdistributionmodulesloggingcerebrooverrides) | `object` | Optional | +### Description + +DEPRECATED since KFD v1.26.6, 1.27.5, v1.28.0. + ## .spec.distribution.modules.logging.cerebro.overrides ### Properties @@ -2163,7 +2368,7 @@ The value of the toleration ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.logging.cerebro.overrides.tolerations @@ -2178,13 +2383,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.logging.cerebro.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2202,7 +2407,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2232,55 +2437,55 @@ The value of the toleration ### Description -when using the customOutputs logging type, you need to manually specify the spec of the several Output and ClusterOutputs that the Logging Operator expects to forward the logs collected by the pre-defined flows. +When using the `customOutputs` logging type, you need to manually specify the spec of the several `Output` and `ClusterOutputs` that the Logging Operator expects to forward the logs collected by the pre-defined flows. ## .spec.distribution.modules.logging.customOutputs.audit ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `audit` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.errors ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `errors` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.events ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `events` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.infra ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `infra` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.ingressNginx ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `ingressNginx` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.kubernetes ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `kubernetes` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.systemdCommon ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `systemdCommon` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.systemdEtcd ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `systemdEtcd` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.loki @@ -2291,12 +2496,21 @@ This value defines where the output from Flow will be sent. Will be the `spec` s | [backend](#specdistributionmoduleslogginglokibackend) | `string` | Optional | | [externalEndpoint](#specdistributionmoduleslogginglokiexternalendpoint) | `object` | Optional | | [resources](#specdistributionmoduleslogginglokiresources) | `object` | Optional | +| [tsdbStartDate](#specdistributionmoduleslogginglokitsdbstartdate) | `string` | Required | + +### Description + +Configuration for the Loki package. ## .spec.distribution.modules.logging.loki.backend +### Description + +The storage backend type for Loki. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external object storage instead of deploying an in-cluster MinIO. + ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2315,35 +2529,39 @@ This value defines where the output from Flow will be sent. Will be the `spec` s | [insecure](#specdistributionmoduleslogginglokiexternalendpointinsecure) | `boolean` | Optional | | [secretAccessKey](#specdistributionmoduleslogginglokiexternalendpointsecretaccesskey) | `string` | Optional | +### Description + +Configuration for Loki's external storage backend. + ## .spec.distribution.modules.logging.loki.externalEndpoint.accessKeyId ### Description -The access key id of the loki external endpoint +The access key ID (username) for the external S3-compatible bucket. ## .spec.distribution.modules.logging.loki.externalEndpoint.bucketName ### Description -The bucket name of the loki external endpoint +The bucket name of the external S3-compatible object storage. ## .spec.distribution.modules.logging.loki.externalEndpoint.endpoint ### Description -The endpoint of the loki external endpoint +External S3-compatible endpoint for Loki's storage. ## .spec.distribution.modules.logging.loki.externalEndpoint.insecure ### Description -If true, the loki external endpoint will be insecure +If true, will use HTTP as protocol instead of HTTPS. ## .spec.distribution.modules.logging.loki.externalEndpoint.secretAccessKey ### Description -The secret access key of the loki external endpoint +The secret access key (password) for the external S3-compatible bucket. ## .spec.distribution.modules.logging.loki.resources @@ -2367,13 +2585,13 @@ The secret access key of the loki external endpoint ### Description -The cpu limit for the opensearch pods +The CPU limit for the Pod. Example: `1000m`. ## .spec.distribution.modules.logging.loki.resources.limits.memory ### Description -The memory limit for the opensearch pods +The memory limit for the Pod. Example: `1G`. ## .spec.distribution.modules.logging.loki.resources.requests @@ -2388,13 +2606,23 @@ The memory limit for the opensearch pods ### Description -The cpu request for the prometheus pods +The CPU request for the Pod, in cores. Example: `500m`. ## .spec.distribution.modules.logging.loki.resources.requests.memory ### Description -The memory request for the opensearch pods +The memory request for the Pod. Example: `500M`. + +## .spec.distribution.modules.logging.loki.tsdbStartDate + +### Description + +Starting from versions 1.28.4, 1.29.5 and 1.30.0 of KFD, Loki will change the time series database from BoltDB to TSDB and the schema from v11 to v13 that it uses to store the logs. + +The value of this field will determine the date when Loki will start writing using the new TSDB and the schema v13, always at midnight UTC. The old BoltDB and schema will be kept until they expire for reading purposes. + +Value must be a string in `ISO 8601` date format (`yyyy-mm-dd`). Example: `2024-11-18`. ## .spec.distribution.modules.logging.minio @@ -2406,6 +2634,10 @@ The memory request for the opensearch pods | [rootUser](#specdistributionmodulesloggingminiorootuser) | `object` | Optional | | [storageSize](#specdistributionmodulesloggingminiostoragesize) | `string` | Optional | +### Description + +Configuration for Logging's MinIO deployment. + ## .spec.distribution.modules.logging.minio.overrides ### Properties @@ -2419,7 +2651,7 @@ The memory request for the opensearch pods ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.logging.minio.overrides.tolerations @@ -2434,13 +2666,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.logging.minio.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2458,7 +2690,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2484,19 +2716,19 @@ The value of the toleration ### Description -The password of the minio root user +The password for the default MinIO root user. ## .spec.distribution.modules.logging.minio.rootUser.username ### Description -The username of the minio root user +The username for the default MinIO root user. ## .spec.distribution.modules.logging.minio.storageSize ### Description -The PVC size for each minio disk, 6 disks total +The PVC size for each MinIO disk, 6 disks total. ## .spec.distribution.modules.logging.opensearch @@ -2522,7 +2754,7 @@ The PVC size for each minio disk, 6 disks total ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.logging.opensearch.overrides.tolerations @@ -2537,13 +2769,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.logging.opensearch.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2561,7 +2793,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2596,13 +2828,13 @@ The value of the toleration ### Description -The cpu limit for the opensearch pods +The CPU limit for the Pod. Example: `1000m`. ## .spec.distribution.modules.logging.opensearch.resources.limits.memory ### Description -The memory limit for the opensearch pods +The memory limit for the Pod. Example: `1G`. ## .spec.distribution.modules.logging.opensearch.resources.requests @@ -2617,29 +2849,29 @@ The memory limit for the opensearch pods ### Description -The cpu request for the prometheus pods +The CPU request for the Pod, in cores. Example: `500m`. ## .spec.distribution.modules.logging.opensearch.resources.requests.memory ### Description -The memory request for the opensearch pods +The memory request for the Pod. Example: `500M`. ## .spec.distribution.modules.logging.opensearch.storageSize ### Description -The storage size for the opensearch pods +The storage size for the OpenSearch volumes. Follows Kubernetes resources storage requests. Default is `150Gi`. ## .spec.distribution.modules.logging.opensearch.type ### Description -The type of the opensearch, must be ***single*** or ***triple*** +The type of OpenSearch deployment. One of: `single` for a single replica or `triple` for an HA 3-replicas deployment. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2654,6 +2886,10 @@ The type of the opensearch, must be ***single*** or ***triple*** |:--------------------------------------------------------------|:---------|:---------| | [overrides](#specdistributionmodulesloggingoperatoroverrides) | `object` | Optional | +### Description + +Configuration for the Logging Operator. + ## .spec.distribution.modules.logging.operator.overrides ### Properties @@ -2667,7 +2903,7 @@ The type of the opensearch, must be ***single*** or ***triple*** ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.logging.operator.overrides.tolerations @@ -2682,13 +2918,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.logging.operator.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2706,7 +2942,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2729,13 +2965,17 @@ The value of the toleration | [nodeSelector](#specdistributionmodulesloggingoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulesloggingoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the module. + ## .spec.distribution.modules.logging.overrides.ingresses ## .spec.distribution.modules.logging.overrides.nodeSelector ### Description -The node selector to use to place the pods for the dr module +Set to override the node selector used to place the pods of the module. ## .spec.distribution.modules.logging.overrides.tolerations @@ -2750,13 +2990,13 @@ The node selector to use to place the pods for the dr module ### Description -The tolerations that will be added to the pods for the monitoring module +Set to override the tolerations that will be added to the pods of the module. ## .spec.distribution.modules.logging.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2774,7 +3014,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2791,11 +3031,17 @@ The value of the toleration ### Description -selects the logging stack. Choosing none will disable the centralized logging. Choosing opensearch will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored. Choosing loki will use a distributed Grafana Loki instead of OpenSearh for storage. Choosing customOuput the Logging Operator will be deployed and installed but with no local storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage. +Selects the logging stack. Options are: +- `none`: will disable the centralized logging. +- `opensearch`: will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored. +- `loki`: will use a distributed Grafana Loki instead of OpenSearch for storage. +- `customOuputs`: the Logging Operator will be deployed and installed but without in-cluster storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage. + +Default is `opensearch`. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:----------------| @@ -2824,7 +3070,7 @@ selects the logging stack. Choosing none will disable the centralized logging. C ### Description -configuration for the Monitoring module components +Configuration for the Monitoring module. ## .spec.distribution.modules.monitoring.alertmanager @@ -2840,19 +3086,19 @@ configuration for the Monitoring module components ### Description -The webhook url to send deadman switch monitoring, for example to use with healthchecks.io +The webhook URL to send dead man's switch monitoring, for example to use with healthchecks.io. ## .spec.distribution.modules.monitoring.alertmanager.installDefaultRules ### Description -If true, the default rules will be installed +Set to false to avoid installing the Prometheus rules (alerts) included with the distribution. ## .spec.distribution.modules.monitoring.alertmanager.slackWebhookUrl ### Description -The slack webhook url to send alerts +The Slack webhook URL where to send the infrastructural and workload alerts to. ## .spec.distribution.modules.monitoring.blackboxExporter @@ -2875,7 +3121,7 @@ The slack webhook url to send alerts ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.monitoring.blackboxExporter.overrides.tolerations @@ -2890,13 +3136,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.monitoring.blackboxExporter.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2914,7 +3160,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2958,7 +3204,7 @@ Notice that by default anonymous access is enabled. ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.monitoring.grafana.overrides.tolerations @@ -2973,13 +3219,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.monitoring.grafana.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2997,7 +3243,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3043,7 +3289,7 @@ More details in [Grafana's documentation](https://grafana.com/docs/grafana/lates ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.monitoring.kubeStateMetrics.overrides.tolerations @@ -3058,13 +3304,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.monitoring.kubeStateMetrics.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3082,7 +3328,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3106,15 +3352,19 @@ The value of the toleration | [overrides](#specdistributionmodulesmonitoringmimiroverrides) | `object` | Optional | | [retentionTime](#specdistributionmodulesmonitoringmimirretentiontime) | `string` | Optional | +### Description + +Configuration for the Mimir package. + ## .spec.distribution.modules.monitoring.mimir.backend ### Description -The backend for the mimir pods, must be ***minio*** or ***externalEndpoint*** +The storage backend type for Mimir. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external S3-compatible object storage instead of deploying an in-cluster MinIO. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3133,35 +3383,39 @@ The backend for the mimir pods, must be ***minio*** or ***externalEndpoint*** | [insecure](#specdistributionmodulesmonitoringmimirexternalendpointinsecure) | `boolean` | Optional | | [secretAccessKey](#specdistributionmodulesmonitoringmimirexternalendpointsecretaccesskey) | `string` | Optional | +### Description + +Configuration for Mimir's external storage backend. + ## .spec.distribution.modules.monitoring.mimir.externalEndpoint.accessKeyId ### Description -The access key id of the external mimir backend +The access key ID (username) for the external S3-compatible bucket. ## .spec.distribution.modules.monitoring.mimir.externalEndpoint.bucketName ### Description -The bucket name of the external mimir backend +The bucket name of the external S3-compatible object storage. ## .spec.distribution.modules.monitoring.mimir.externalEndpoint.endpoint ### Description -The endpoint of the external mimir backend +The external S3-compatible endpoint for Mimir's storage. ## .spec.distribution.modules.monitoring.mimir.externalEndpoint.insecure ### Description -If true, the external mimir backend will not use tls +If true, will use HTTP as protocol instead of HTTPS. ## .spec.distribution.modules.monitoring.mimir.externalEndpoint.secretAccessKey ### Description -The secret access key of the external mimir backend +The secret access key (password) for the external S3-compatible bucket. ## .spec.distribution.modules.monitoring.mimir.overrides @@ -3176,7 +3430,7 @@ The secret access key of the external mimir backend ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.monitoring.mimir.overrides.tolerations @@ -3191,13 +3445,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.monitoring.mimir.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3215,7 +3469,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3232,7 +3486,7 @@ The value of the toleration ### Description -The retention time for the mimir pods +The retention time for the logs stored in Mimir. Default is `30d`. Value must match the regular expression `[0-9]+(ns|us|µs|ms|s|m|h|d|w|y)` where y = 365 days. ## .spec.distribution.modules.monitoring.minio @@ -3244,6 +3498,10 @@ The retention time for the mimir pods | [rootUser](#specdistributionmodulesmonitoringminiorootuser) | `object` | Optional | | [storageSize](#specdistributionmodulesmonitoringminiostoragesize) | `string` | Optional | +### Description + +Configuration for Monitoring's MinIO deployment. + ## .spec.distribution.modules.monitoring.minio.overrides ### Properties @@ -3257,7 +3515,7 @@ The retention time for the mimir pods ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.monitoring.minio.overrides.tolerations @@ -3272,13 +3530,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.monitoring.minio.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3296,7 +3554,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3322,19 +3580,19 @@ The value of the toleration ### Description -The password for the minio root user +The password for the default MinIO root user. ## .spec.distribution.modules.monitoring.minio.rootUser.username ### Description -The username for the minio root user +The username for the default MinIO root user. ## .spec.distribution.modules.monitoring.minio.storageSize ### Description -The storage size for the minio pods +The PVC size for each MinIO disk, 6 disks total. ## .spec.distribution.modules.monitoring.overrides @@ -3346,13 +3604,17 @@ The storage size for the minio pods | [nodeSelector](#specdistributionmodulesmonitoringoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulesmonitoringoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the module. + ## .spec.distribution.modules.monitoring.overrides.ingresses ## .spec.distribution.modules.monitoring.overrides.nodeSelector ### Description -The node selector to use to place the pods for the dr module +Set to override the node selector used to place the pods of the module. ## .spec.distribution.modules.monitoring.overrides.tolerations @@ -3367,13 +3629,13 @@ The node selector to use to place the pods for the dr module ### Description -The tolerations that will be added to the pods for the monitoring module +Set to override the tolerations that will be added to the pods of the module. ## .spec.distribution.modules.monitoring.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3391,7 +3653,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3446,13 +3708,13 @@ Set this option to ship the collected metrics to a remote Prometheus receiver. ### Description -The cpu limit for the opensearch pods +The CPU limit for the Pod. Example: `1000m`. ## .spec.distribution.modules.monitoring.prometheus.resources.limits.memory ### Description -The memory limit for the opensearch pods +The memory limit for the Pod. Example: `1G`. ## .spec.distribution.modules.monitoring.prometheus.resources.requests @@ -3467,31 +3729,31 @@ The memory limit for the opensearch pods ### Description -The cpu request for the prometheus pods +The CPU request for the Pod, in cores. Example: `500m`. ## .spec.distribution.modules.monitoring.prometheus.resources.requests.memory ### Description -The memory request for the opensearch pods +The memory request for the Pod. Example: `500M`. ## .spec.distribution.modules.monitoring.prometheus.retentionSize ### Description -The retention size for the k8s Prometheus instance. +The retention size for the `k8s` Prometheus instance. ## .spec.distribution.modules.monitoring.prometheus.retentionTime ### Description -The retention time for the k8s Prometheus instance. +The retention time for the `k8s` Prometheus instance. ## .spec.distribution.modules.monitoring.prometheus.storageSize ### Description -The storage size for the k8s Prometheus instance. +The storage size for the `k8s` Prometheus instance. ## .spec.distribution.modules.monitoring.prometheusAgent @@ -3532,13 +3794,13 @@ Set this option to ship the collected metrics to a remote Prometheus receiver. ### Description -The cpu limit for the opensearch pods +The CPU limit for the Pod. Example: `1000m`. ## .spec.distribution.modules.monitoring.prometheusAgent.resources.limits.memory ### Description -The memory limit for the opensearch pods +The memory limit for the Pod. Example: `1G`. ## .spec.distribution.modules.monitoring.prometheusAgent.resources.requests @@ -3553,28 +3815,30 @@ The memory limit for the opensearch pods ### Description -The cpu request for the prometheus pods +The CPU request for the Pod, in cores. Example: `500m`. ## .spec.distribution.modules.monitoring.prometheusAgent.resources.requests.memory ### Description -The memory request for the opensearch pods +The memory request for the Pod. Example: `500M`. ## .spec.distribution.modules.monitoring.type ### Description -The type of the monitoring, must be ***none***, ***prometheus***, ***prometheusAgent*** or ***mimir***. +The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or `mimir`. - `none`: will disable the whole monitoring stack. - `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instance, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more. -- `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster. -- `mimir`: will install the same as the `prometheus` option, and in addition Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage. +- `prometheusAgent`: will install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster. +- `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage. + +Default is `prometheus`. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:------------------| @@ -3604,7 +3868,7 @@ The type of the monitoring, must be ***none***, ***prometheus***, ***prometheusA ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.monitoring.x509Exporter.overrides.tolerations @@ -3619,13 +3883,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.monitoring.x509Exporter.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3643,7 +3907,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3665,20 +3929,31 @@ The value of the toleration | [overrides](#specdistributionmodulesnetworkingoverrides) | `object` | Optional | | [tigeraOperator](#specdistributionmodulesnetworkingtigeraoperator) | `object` | Optional | +### Description + +Configuration for the Networking module. + ## .spec.distribution.modules.networking.overrides ### Properties | Property | Type | Required | |:------------------------------------------------------------------------|:---------|:---------| +| [ingresses](#specdistributionmodulesnetworkingoverridesingresses) | `object` | Optional | | [nodeSelector](#specdistributionmodulesnetworkingoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulesnetworkingoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the module. + +## .spec.distribution.modules.networking.overrides.ingresses + ## .spec.distribution.modules.networking.overrides.nodeSelector ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the module. ## .spec.distribution.modules.networking.overrides.tolerations @@ -3693,13 +3968,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the module. ## .spec.distribution.modules.networking.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3717,7 +3992,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3751,7 +4026,7 @@ The value of the toleration ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.networking.tigeraOperator.overrides.tolerations @@ -3766,13 +4041,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.networking.tigeraOperator.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3790,7 +4065,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3814,6 +4089,10 @@ The value of the toleration | [overrides](#specdistributionmodulespolicyoverrides) | `object` | Optional | | [type](#specdistributionmodulespolicytype) | `string` | Required | +### Description + +Configuration for the Policy module. + ## .spec.distribution.modules.policy.gatekeeper ### Properties @@ -3825,6 +4104,10 @@ The value of the toleration | [installDefaultPolicies](#specdistributionmodulespolicygatekeeperinstalldefaultpolicies) | `boolean` | Required | | [overrides](#specdistributionmodulespolicygatekeeperoverrides) | `object` | Optional | +### Description + +Configuration for the Gatekeeper package. + ## .spec.distribution.modules.policy.gatekeeper.additionalExcludedNamespaces ### Description @@ -3835,11 +4118,11 @@ This parameter adds namespaces to Gatekeeper's exemption list, so it will not en ### Description -The enforcement action to use for the gatekeeper module +The default enforcement action to use for the included constraints. `deny` will block the admission when violations to the policies are found, `warn` will show a message to the user but will admit the violating requests and `dryrun` won't give any feedback to the user but it will log the violations. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3851,7 +4134,7 @@ The enforcement action to use for the gatekeeper module ### Description -If true, the default policies will be installed +Set to `false` to avoid installing the default Gatekeeper policies (constraints templates and constraints) included with the distribution. ## .spec.distribution.modules.policy.gatekeeper.overrides @@ -3866,7 +4149,7 @@ If true, the default policies will be installed ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.policy.gatekeeper.overrides.tolerations @@ -3881,13 +4164,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.policy.gatekeeper.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3905,7 +4188,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3929,17 +4212,21 @@ The value of the toleration | [overrides](#specdistributionmodulespolicykyvernooverrides) | `object` | Optional | | [validationFailureAction](#specdistributionmodulespolicykyvernovalidationfailureaction) | `string` | Required | +### Description + +Configuration for the Kyverno package. + ## .spec.distribution.modules.policy.kyverno.additionalExcludedNamespaces ### Description -This parameter adds namespaces to Kyverno's exemption list, so it will not enforce the constraints on them. +This parameter adds namespaces to Kyverno's exemption list, so it will not enforce the policies on them. ## .spec.distribution.modules.policy.kyverno.installDefaultPolicies ### Description -If true, the default policies will be installed +Set to `false` to avoid installing the default Kyverno policies included with distribution. ## .spec.distribution.modules.policy.kyverno.overrides @@ -3954,7 +4241,7 @@ If true, the default policies will be installed ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.policy.kyverno.overrides.tolerations @@ -3969,13 +4256,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.policy.kyverno.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3993,7 +4280,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -4010,16 +4297,16 @@ The value of the toleration ### Description -The validation failure action to use for the kyverno module +The validation failure action to use for the policies, `Enforce` will block when a request does not comply with the policies and `Audit` will not block but log when a request does not comply with the policies. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:----------| -|`"audit"` | -|`"enforce"`| +|`"Audit"` | +|`"Enforce"`| ## .spec.distribution.modules.policy.overrides @@ -4031,13 +4318,17 @@ The validation failure action to use for the kyverno module | [nodeSelector](#specdistributionmodulespolicyoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulespolicyoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the module. + ## .spec.distribution.modules.policy.overrides.ingresses ## .spec.distribution.modules.policy.overrides.nodeSelector ### Description -The node selector to use to place the pods for the dr module +Set to override the node selector used to place the pods of the module. ## .spec.distribution.modules.policy.overrides.tolerations @@ -4052,13 +4343,13 @@ The node selector to use to place the pods for the dr module ### Description -The tolerations that will be added to the pods for the monitoring module +Set to override the tolerations that will be added to the pods of the module. ## .spec.distribution.modules.policy.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -4076,7 +4367,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -4093,11 +4384,13 @@ The value of the toleration ### Description -The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno*** +The type of policy enforcement to use, either `none`, `gatekeeper` or `kyverno`. + +Default is `none`. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------| @@ -4116,6 +4409,10 @@ The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno** | [tempo](#specdistributionmodulestracingtempo) | `object` | Optional | | [type](#specdistributionmodulestracingtype) | `string` | Required | +### Description + +Configuration for the Tracing module. + ## .spec.distribution.modules.tracing.minio ### Properties @@ -4126,6 +4423,10 @@ The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno** | [rootUser](#specdistributionmodulestracingminiorootuser) | `object` | Optional | | [storageSize](#specdistributionmodulestracingminiostoragesize) | `string` | Optional | +### Description + +Configuration for Tracing's MinIO deployment. + ## .spec.distribution.modules.tracing.minio.overrides ### Properties @@ -4139,7 +4440,7 @@ The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno** ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.tracing.minio.overrides.tolerations @@ -4154,13 +4455,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.tracing.minio.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -4178,7 +4479,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -4204,19 +4505,19 @@ The value of the toleration ### Description -The password for the minio root user +The password for the default MinIO root user. ## .spec.distribution.modules.tracing.minio.rootUser.username ### Description -The username for the minio root user +The username for the default MinIO root user. ## .spec.distribution.modules.tracing.minio.storageSize ### Description -The storage size for the minio pods +The PVC size for each MinIO disk, 6 disks total. ## .spec.distribution.modules.tracing.overrides @@ -4228,13 +4529,17 @@ The storage size for the minio pods | [nodeSelector](#specdistributionmodulestracingoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulestracingoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the module. + ## .spec.distribution.modules.tracing.overrides.ingresses ## .spec.distribution.modules.tracing.overrides.nodeSelector ### Description -The node selector to use to place the pods for the dr module +Set to override the node selector used to place the pods of the module. ## .spec.distribution.modules.tracing.overrides.tolerations @@ -4249,13 +4554,13 @@ The node selector to use to place the pods for the dr module ### Description -The tolerations that will be added to the pods for the monitoring module +Set to override the tolerations that will be added to the pods of the module. ## .spec.distribution.modules.tracing.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -4273,7 +4578,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -4297,15 +4602,19 @@ The value of the toleration | [overrides](#specdistributionmodulestracingtempooverrides) | `object` | Optional | | [retentionTime](#specdistributionmodulestracingtemporetentiontime) | `string` | Optional | +### Description + +Configuration for the Tempo package. + ## .spec.distribution.modules.tracing.tempo.backend ### Description -The backend for the tempo pods, must be ***minio*** or ***externalEndpoint*** +The storage backend type for Tempo. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external S3-compatible object storage instead of deploying an in-cluster MinIO. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -4324,35 +4633,39 @@ The backend for the tempo pods, must be ***minio*** or ***externalEndpoint*** | [insecure](#specdistributionmodulestracingtempoexternalendpointinsecure) | `boolean` | Optional | | [secretAccessKey](#specdistributionmodulestracingtempoexternalendpointsecretaccesskey) | `string` | Optional | +### Description + +Configuration for Tempo's external storage backend. + ## .spec.distribution.modules.tracing.tempo.externalEndpoint.accessKeyId ### Description -The access key id of the external tempo backend +The access key ID (username) for the external S3-compatible bucket. ## .spec.distribution.modules.tracing.tempo.externalEndpoint.bucketName ### Description -The bucket name of the external tempo backend +The bucket name of the external S3-compatible object storage. ## .spec.distribution.modules.tracing.tempo.externalEndpoint.endpoint ### Description -The endpoint of the external tempo backend +The external S3-compatible endpoint for Tempo's storage. ## .spec.distribution.modules.tracing.tempo.externalEndpoint.insecure ### Description -If true, the external tempo backend will not use tls +If true, will use HTTP as protocol instead of HTTPS. ## .spec.distribution.modules.tracing.tempo.externalEndpoint.secretAccessKey ### Description -The secret access key of the external tempo backend +The secret access key (password) for the external S3-compatible bucket. ## .spec.distribution.modules.tracing.tempo.overrides @@ -4367,7 +4680,7 @@ The secret access key of the external tempo backend ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.tracing.tempo.overrides.tolerations @@ -4382,13 +4695,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.tracing.tempo.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -4406,7 +4719,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -4423,17 +4736,19 @@ The value of the toleration ### Description -The retention time for the tempo pods +The retention time for the traces stored in Tempo. ## .spec.distribution.modules.tracing.type ### Description -The type of tracing to use, either ***none*** or ***tempo*** +The type of tracing to use, either `none` or `tempo`. `none` will disable the Tracing module and `tempo` will install a Grafana Tempo deployment. + +Default is `tempo`. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:--------| @@ -4442,6 +4757,10 @@ The type of tracing to use, either ***none*** or ***tempo*** ## .spec.distributionVersion +### Description + +Defines which KFD version will be installed and, in consequence, the Kubernetes version used to create the cluster. It supports git tags and branches. Example: `v1.30.1`. + ### Constraints **minimum length**: the minimum number of characters for this string is: `1` @@ -4465,7 +4784,7 @@ The type of tracing to use, either ***none*** or ***tempo*** ### Description -This key defines the VPC that will be created in AWS +Configuration for the VPC that will be created to host the EKS cluster and its related resources. If you already have a VPC that you want to use, leave this section empty and use `.spec.kubernetes.vpcId` instead. ## .spec.infrastructure.vpc.network @@ -4480,7 +4799,7 @@ This key defines the VPC that will be created in AWS ### Description -This is the CIDR of the VPC that will be created +The network CIDR for the VPC that will be created ### Constraints @@ -4501,11 +4820,15 @@ This is the CIDR of the VPC that will be created | [private](#specinfrastructurevpcnetworksubnetscidrsprivate) | `array` | Required | | [public](#specinfrastructurevpcnetworksubnetscidrspublic) | `array` | Required | +### Description + +Network CIDRS configuration for private and public subnets. + ## .spec.infrastructure.vpc.network.subnetsCidrs.private ### Description -These are the CIRDs for the private subnets, where the nodes, the pods, and the private load balancers will be created +The network CIDRs for the private subnets, where the nodes, the pods, and the private load balancers will be created ### Constraints @@ -4521,7 +4844,7 @@ These are the CIRDs for the private subnets, where the nodes, the pods, and the ### Description -These are the CIDRs for the public subnets, where the public load balancers and the VPN servers will be created +The network CIDRs for the public subnets, where the public load balancers and the VPN servers will be created ### Constraints @@ -4553,31 +4876,31 @@ These are the CIDRs for the public subnets, where the public load balancers and ### Description -This section defines the creation of VPN bastions +Configuration for the VPN server instances. ## .spec.infrastructure.vpn.bucketNamePrefix ### Description -This value defines the prefix that will be used to create the bucket name where the VPN servers will store the states +This value defines the prefix for the bucket name where the VPN servers will store their state (VPN certificates, users). ## .spec.infrastructure.vpn.dhParamsBits ### Description -The dhParamsBits size used for the creation of the .pem file that will be used in the dh openvpn server.conf file +The `dhParamsBits` size used for the creation of the .pem file that will be used in the dh openvpn server.conf file. ## .spec.infrastructure.vpn.diskSize ### Description -The size of the disk in GB +The size of the disk in GB for each VPN server. Example: entering `50` will create disks of 50 GB. ## .spec.infrastructure.vpn.iamUserNameOverride ### Description -Overrides the default IAM user name for the VPN +Overrides IAM user name for the VPN. Default is to use the cluster name. ### Constraints @@ -4593,25 +4916,25 @@ Overrides the default IAM user name for the VPN ### Description -The size of the AWS EC2 instance +The type of the AWS EC2 instance for each VPN server. Follows AWS EC2 nomenclature. Example: `t3-micro`. ## .spec.infrastructure.vpn.instances ### Description -The number of instances to create, 0 to skip the creation +The number of VPN server instances to create, `0` to skip the creation. ## .spec.infrastructure.vpn.operatorName ### Description -The username of the account to create in the bastion's operating system +The username of the account to create in the bastion's operating system. ## .spec.infrastructure.vpn.port ### Description -The port used by the OpenVPN server +The port where each OpenVPN server will listen for connections. ## .spec.infrastructure.vpn.ssh @@ -4627,7 +4950,7 @@ The port used by the OpenVPN server ### Description -The CIDR enabled in the security group that can access the bastions in SSH +The network CIDR enabled in the security group to access the VPN servers (bastions) via SSH. Setting this to `0.0.0.0/0` will allow any source. ### Constraints @@ -4643,7 +4966,7 @@ The CIDR enabled in the security group that can access the bastions in SSH ### Description -The github user name list that will be used to get the ssh public key that will be added as authorized key to the operatorName user +List of GitHub usernames from whom get their SSH public key and add as authorized keys of the `operatorName` user. ### Constraints @@ -4653,13 +4976,13 @@ The github user name list that will be used to get the ssh public key that will ### Description -This value defines the public keys that will be added to the bastion's operating system NOTES: Not yet implemented +**NOT IN USE**, use `githubUsersName` instead. This value defines the public keys that will be added to the bastion's operating system. ## .spec.infrastructure.vpn.vpcId ### Description -The VPC ID where the VPN servers will be created, required only if .spec.infrastructure.vpc is omitted +The ID of the VPC where the VPN server instances will be created, required only if `.spec.infrastructure.vpc` is omitted. ### Constraints @@ -4675,7 +4998,7 @@ The VPC ID where the VPN servers will be created, required only if .spec.infrast ### Description -The CIDR that will be used to assign IP addresses to the VPN clients when connected +The network CIDR that will be used to assign IP addresses to the VPN clients when connected. ### Constraints @@ -4699,6 +5022,7 @@ The CIDR that will be used to assign IP addresses to the VPN clients when connec | [logRetentionDays](#speckuberneteslogretentiondays) | `integer` | Optional | | [logsTypes](#speckuberneteslogstypes) | `array` | Optional | | [nodeAllowedSshPublicKey](#speckubernetesnodeallowedsshpublickey) | `object` | Required | +| [nodePoolGlobalAmiType](#speckubernetesnodepoolglobalamitype) | `string` | Required | | [nodePools](#speckubernetesnodepools) | `array` | Required | | [nodePoolsLaunchKind](#speckubernetesnodepoolslaunchkind) | `string` | Required | | [serviceIpV4Cidr](#speckubernetesserviceipv4cidr) | `string` | Optional | @@ -4706,6 +5030,10 @@ The CIDR that will be used to assign IP addresses to the VPN clients when connec | [vpcId](#speckubernetesvpcid) | `string` | Optional | | [workersIAMRoleNamePrefixOverride](#speckubernetesworkersiamrolenameprefixoverride) | `string` | Optional | +### Description + +Defines the Kubernetes components configuration and the values needed for the `kubernetes` phase of furyctl. + ## .spec.kubernetes.apiServer ### Properties @@ -4721,13 +5049,13 @@ The CIDR that will be used to assign IP addresses to the VPN clients when connec ### Description -This value defines if the API server will be accessible only from the private subnets +This value defines if the Kubernetes API server will be accessible from the private subnets. Default it `true`. ## .spec.kubernetes.apiServer.privateAccessCidrs ### Description -This value defines the CIDRs that will be allowed to access the API server from the private subnets +The network CIDRs from the private subnets that will be allowed access the Kubernetes API server. ### Constraints @@ -4743,13 +5071,13 @@ This value defines the CIDRs that will be allowed to access the API server from ### Description -This value defines if the API server will be accessible from the public subnets +This value defines if the Kubernetes API server will be accessible from the public subnets. Default is `false`. ## .spec.kubernetes.apiServer.publicAccessCidrs ### Description -This value defines the CIDRs that will be allowed to access the API server from the public subnets +The network CIDRs from the public subnets that will be allowed access the Kubernetes API server. ### Constraints @@ -4771,11 +5099,17 @@ This value defines the CIDRs that will be allowed to access the API server from | [roles](#speckubernetesawsauthroles) | `array` | Optional | | [users](#speckubernetesawsauthusers) | `array` | Optional | +### Description + +Optional additional security configuration for EKS IAM via the `aws-auth` configmap. + +Ref: https://docs.aws.amazon.com/eks/latest/userguide/auth-configmap.html + ## .spec.kubernetes.awsAuth.additionalAccounts ### Description -This optional array defines additional AWS accounts that will be added to the aws-auth configmap +This optional array defines additional AWS accounts that will be added to the `aws-auth` configmap. ## .spec.kubernetes.awsAuth.roles @@ -4789,7 +5123,7 @@ This optional array defines additional AWS accounts that will be added to the aw ### Description -This optional array defines additional IAM roles that will be added to the aws-auth configmap +This optional array defines additional IAM roles that will be added to the `aws-auth` configmap. ## .spec.kubernetes.awsAuth.roles.groups @@ -4819,7 +5153,7 @@ This optional array defines additional IAM roles that will be added to the aws-a ### Description -This optional array defines additional IAM users that will be added to the aws-auth configmap +This optional array defines additional IAM users that will be added to the `aws-auth` configmap. ## .spec.kubernetes.awsAuth.users.groups @@ -4841,7 +5175,7 @@ This optional array defines additional IAM users that will be added to the aws-a ### Description -Overrides the default IAM role name prefix for the EKS cluster +Overrides the default prefix for the IAM role name of the EKS cluster. If not set, a name will be generated from the cluster name. ### Constraints @@ -4857,7 +5191,37 @@ Overrides the default IAM role name prefix for the EKS cluster ### Description -Optional Kubernetes Cluster log retention in days. Defaults to 90 days. +Optional Kubernetes Cluster log retention in CloudWatch, expressed in days. Setting the value to zero (`0`) makes retention last forever. Default is `90` days. + +### Constraints + +**enum**: the value of this property must be equal to one of the following integer values: + +| Value | +|:----| +|0 | +|1 | +|3 | +|5 | +|7 | +|14 | +|30 | +|60 | +|90 | +|120 | +|150 | +|180 | +|365 | +|400 | +|545 | +|731 | +|1096| +|1827| +|2192| +|2557| +|2922| +|3288| +|3653| ## .spec.kubernetes.logsTypes @@ -4867,7 +5231,7 @@ Optional list of Kubernetes Cluster log types to enable. Defaults to all types. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:--------------------| @@ -4881,7 +5245,22 @@ Optional list of Kubernetes Cluster log types to enable. Defaults to all types. ### Description -This key contains the ssh public key that can connect to the nodes via SSH using the ec2-user user +The SSH public key that can connect to the nodes via SSH using the `ec2-user` user. Example: the contents of your `~/.ssh/id_ras.pub` file. + +## .spec.kubernetes.nodePoolGlobalAmiType + +### Description + +Global default AMI type used for EKS worker nodes. This will apply to all node pools unless overridden by a specific node pool. + +### Constraints + +**enum**: the value of this property must be equal to one of the following string values: + +| Value | +|:-------------| +|`"alinux2"` | +|`"alinux2023"`| ## .spec.kubernetes.nodePools @@ -4900,7 +5279,11 @@ This key contains the ssh public key that can connect to the nodes via SSH using | [subnetIds](#speckubernetesnodepoolssubnetids) | `array` | Optional | | [tags](#speckubernetesnodepoolstags) | `object` | Optional | | [taints](#speckubernetesnodepoolstaints) | `array` | Optional | -| [type](#speckubernetesnodepoolstype) | `string` | Optional | +| [type](#speckubernetesnodepoolstype) | `string` | Required | + +### Description + +Array with all the node pool definitions that will join the cluster. Each item is an object. ## .spec.kubernetes.nodePools.additionalFirewallRules @@ -4912,6 +5295,10 @@ This key contains the ssh public key that can connect to the nodes via SSH using | [self](#speckubernetesnodepoolsadditionalfirewallrulesself) | `array` | Optional | | [sourceSecurityGroupId](#speckubernetesnodepoolsadditionalfirewallrulessourcesecuritygroupid) | `array` | Optional | +### Description + +Optional additional firewall rules that will be attached to the nodes. + ## .spec.kubernetes.nodePools.additionalFirewallRules.cidrBlocks ### Properties @@ -4927,10 +5314,12 @@ This key contains the ssh public key that can connect to the nodes via SSH using ### Description -The CIDR blocks for the FW rule. At the moment the first item of the list will be used, others will be ignored. +The CIDR blocks objects definition for the Firewall rule. Even though it is a list, only one item is currently supported. See https://github.com/sighupio/fury-eks-installer/issues/46 for more details. ### Constraints +**maximum number of items**: the maximum number of items for this array is: `1` + **minimum number of items**: the minimum number of items for this array is: `1` ## .spec.kubernetes.nodePools.additionalFirewallRules.cidrBlocks.cidrBlocks @@ -4958,6 +5347,10 @@ The CIDR blocks for the FW rule. At the moment the first item of the list will b | [from](#speckubernetesnodepoolsadditionalfirewallrulescidrblocksportsfrom) | `integer` | Required | | [to](#speckubernetesnodepoolsadditionalfirewallrulescidrblocksportsto) | `integer` | Required | +### Description + +Port range for the Firewall Rule. + ## .spec.kubernetes.nodePools.additionalFirewallRules.cidrBlocks.ports.from ## .spec.kubernetes.nodePools.additionalFirewallRules.cidrBlocks.ports.to @@ -4976,11 +5369,19 @@ The CIDR blocks for the FW rule. At the moment the first item of the list will b ## .spec.kubernetes.nodePools.additionalFirewallRules.cidrBlocks.tags +### Description + +Additional AWS tags for the Firewall rule. + ## .spec.kubernetes.nodePools.additionalFirewallRules.cidrBlocks.type +### Description + +The type of the Firewall rule, can be `ingress` for incoming traffic or `egress` for outgoing traffic. + ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:----------| @@ -5008,7 +5409,7 @@ The CIDR blocks for the FW rule. At the moment the first item of the list will b ### Description -The name of the FW rule +The name of the Firewall rule. ## .spec.kubernetes.nodePools.additionalFirewallRules.self.ports @@ -5019,6 +5420,10 @@ The name of the FW rule | [from](#speckubernetesnodepoolsadditionalfirewallrulesselfportsfrom) | `integer` | Required | | [to](#speckubernetesnodepoolsadditionalfirewallrulesselfportsto) | `integer` | Required | +### Description + +Port range for the Firewall Rule. + ## .spec.kubernetes.nodePools.additionalFirewallRules.self.ports.from ## .spec.kubernetes.nodePools.additionalFirewallRules.self.ports.to @@ -5027,7 +5432,7 @@ The name of the FW rule ### Description -The protocol of the FW rule +The protocol of the Firewall rule. ### Constraints @@ -5043,23 +5448,23 @@ The protocol of the FW rule ### Description -If true, the source will be the security group itself +If `true`, the source will be the security group itself. ## .spec.kubernetes.nodePools.additionalFirewallRules.self.tags ### Description -The tags of the FW rule +Additional AWS tags for the Firewall rule. ## .spec.kubernetes.nodePools.additionalFirewallRules.self.type ### Description -The type of the FW rule can be ingress or egress +The type of the Firewall rule, can be `ingress` for incoming traffic or `egress` for outgoing traffic. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:----------| @@ -5087,7 +5492,7 @@ The type of the FW rule can be ingress or egress ### Description -The name of the FW rule +The name for the additional Firewall rule Security Group. ## .spec.kubernetes.nodePools.additionalFirewallRules.sourceSecurityGroupId.ports @@ -5098,6 +5503,10 @@ The name of the FW rule | [from](#speckubernetesnodepoolsadditionalfirewallrulessourcesecuritygroupidportsfrom) | `integer` | Required | | [to](#speckubernetesnodepoolsadditionalfirewallrulessourcesecuritygroupidportsto) | `integer` | Required | +### Description + +Port range for the Firewall Rule. + ## .spec.kubernetes.nodePools.additionalFirewallRules.sourceSecurityGroupId.ports.from ## .spec.kubernetes.nodePools.additionalFirewallRules.sourceSecurityGroupId.ports.to @@ -5106,7 +5515,7 @@ The name of the FW rule ### Description -The protocol of the FW rule +The protocol of the Firewall rule. ### Constraints @@ -5122,23 +5531,23 @@ The protocol of the FW rule ### Description -The source security group ID +The source security group ID. ## .spec.kubernetes.nodePools.additionalFirewallRules.sourceSecurityGroupId.tags ### Description -The tags of the FW rule +Additional AWS tags for the Firewall rule. ## .spec.kubernetes.nodePools.additionalFirewallRules.sourceSecurityGroupId.type ### Description -The type of the FW rule can be ingress or egress +The type of the Firewall rule, can be `ingress` for incoming traffic or `egress` for outgoing traffic. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:----------| @@ -5151,26 +5560,48 @@ The type of the FW rule can be ingress or egress | Property | Type | Required | |:------------------------------------------|:---------|:---------| -| [id](#speckubernetesnodepoolsamiid) | `string` | Required | -| [owner](#speckubernetesnodepoolsamiowner) | `string` | Required | +| [id](#speckubernetesnodepoolsamiid) | `string` | Optional | +| [owner](#speckubernetesnodepoolsamiowner) | `string` | Optional | +| [type](#speckubernetesnodepoolsamitype) | `string` | Optional | + +### Description + +Configuration for customize the Amazon Machine Image (AMI) for the machines of the Node Pool. + +The AMI can be chosen either by specifing the `ami.id` and `ami.owner` fields for using a custom AMI (just with `self-managed` node pool type) or by setting the `ami.type` field to one of the official AMIs based on Amazon Linux. ## .spec.kubernetes.nodePools.ami.id ### Description -The AMI ID to use for the nodes +The ID of the AMI to use for the nodes, must be set toghether with the `owner` field. `ami.id` and `ami.owner` can be only set when Node Pool type is `self-managed` and they can't be set at the same time than `ami.type`. ## .spec.kubernetes.nodePools.ami.owner ### Description -The owner of the AMI +The owner of the AMI to use for the nodes, must be set toghether with the `id` field. `ami.id` and `ami.owner` can be only set when Node Pool type is `self-managed` and they can't be set at the same time than `ami.type`. + +## .spec.kubernetes.nodePools.ami.type + +### Description + +The AMI type defines the AMI to use for `eks-managed` and `self-managed` type of Node Pools. Only Amazon Linux based AMIs are supported. It can't be set at the same time than `ami.id` and `ami.owner`. + +### Constraints + +**enum**: the value of this property must be equal to one of the following string values: + +| Value | +|:-------------| +|`"alinux2"` | +|`"alinux2023"`| ## .spec.kubernetes.nodePools.attachedTargetGroups ### Description -This optional array defines additional target groups to attach to the instances in the node pool +This optional array defines additional target groups to attach to the instances in the node pool. ### Constraints @@ -5186,11 +5617,11 @@ This optional array defines additional target groups to attach to the instances ### Description -The container runtime to use for the nodes +The container runtime to use in the nodes of the node pool. Default is `containerd`. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------| @@ -5209,31 +5640,45 @@ The container runtime to use for the nodes | [volumeSize](#speckubernetesnodepoolsinstancevolumesize) | `integer` | Optional | | [volumeType](#speckubernetesnodepoolsinstancevolumetype) | `string` | Optional | +### Description + +Configuration for the instances that will be used in the node pool. + ## .spec.kubernetes.nodePools.instance.maxPods +### Description + +Set the maximum pods per node to a custom value. If not set will use EKS default value that depends on the instance type. + +Ref: https://github.com/awslabs/amazon-eks-ami/blob/main/templates/shared/runtime/eni-max-pods.txt + ## .spec.kubernetes.nodePools.instance.spot ### Description -If true, the nodes will be created as spot instances +If `true`, the nodes will be created as spot instances. Default is `false`. ## .spec.kubernetes.nodePools.instance.type ### Description -The instance type to use for the nodes +The instance type to use for the nodes. ## .spec.kubernetes.nodePools.instance.volumeSize ### Description -The size of the disk in GB +The size of the disk in GB. ## .spec.kubernetes.nodePools.instance.volumeType +### Description + +Volume type for the instance disk. Default is `gp2`. + ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-----------| @@ -5246,13 +5691,13 @@ The size of the disk in GB ### Description -Kubernetes labels that will be added to the nodes +Kubernetes labels that will be added to the nodes. ## .spec.kubernetes.nodePools.name ### Description -The name of the node pool +The name of the node pool. ## .spec.kubernetes.nodePools.size @@ -5267,19 +5712,19 @@ The name of the node pool ### Description -The maximum number of nodes in the node pool +The maximum number of nodes in the node pool. ## .spec.kubernetes.nodePools.size.min ### Description -The minimum number of nodes in the node pool +The minimum number of nodes in the node pool. ## .spec.kubernetes.nodePools.subnetIds ### Description -This value defines the subnet IDs where the nodes will be created +Optional list of subnet IDs where to create the nodes. ### Constraints @@ -5295,7 +5740,7 @@ This value defines the subnet IDs where the nodes will be created ### Description -AWS tags that will be added to the ASG and EC2 instances +AWS tags that will be added to the ASG and EC2 instances. ## .spec.kubernetes.nodePools.taints @@ -5311,9 +5756,13 @@ AWS tags that will be added to the ASG and EC2 instances ## .spec.kubernetes.nodePools.type +### Description + +The type of Node Pool, can be `self-managed` for using customization like custom AMI, set max pods per node or `eks-managed` for using prebuilt AMIs from Amazon via the `ami.type` field. It is recommended to use `self-managed`. + ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------------| @@ -5324,11 +5773,11 @@ AWS tags that will be added to the ASG and EC2 instances ### Description -Either `launch_configurations`, `launch_templates` or `both`. For new clusters use `launch_templates`, for existing cluster you'll need to migrate from `launch_configurations` to `launch_templates` using `both` as interim. +Accepted values are `launch_configurations`, `launch_templates` or `both`. For new clusters use `launch_templates`, for adopting an existing cluster you'll need to migrate from `launch_configurations` to `launch_templates` using `both` as interim. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:------------------------| @@ -5340,7 +5789,7 @@ Either `launch_configurations`, `launch_templates` or `both`. For new clusters u ### Description -This value defines the CIDR that will be used to assign IP addresses to the services +This value defines the network CIDR that will be used to assign IP addresses to Kubernetes services. ### Constraints @@ -5356,7 +5805,7 @@ This value defines the CIDR that will be used to assign IP addresses to the serv ### Description -This value defines the subnet IDs where the EKS cluster will be created, required only if .spec.infrastructure.vpc is omitted +Required only if `.spec.infrastructure.vpc` is omitted. This value defines the ID of the subnet where the EKS cluster will be created. ### Constraints @@ -5372,7 +5821,7 @@ This value defines the subnet IDs where the EKS cluster will be created, require ### Description -This value defines the VPC ID where the EKS cluster will be created, required only if .spec.infrastructure.vpc is omitted +Required only if `.spec.infrastructure.vpc` is omitted. This value defines the ID of the VPC where the EKS cluster and its related resources will be created. ### Constraints @@ -5388,7 +5837,7 @@ This value defines the VPC ID where the EKS cluster will be created, required on ### Description -Overrides the default IAM role name prefix for the EKS workers +Overrides the default prefix for the IAM role name of the EKS workers. If not set, a name will be generated from the cluster name. ### Constraints @@ -5422,14 +5871,15 @@ Overrides the default IAM role name prefix for the EKS workers ### Properties -| Property | Type | Required | -|:-----------------------------------------------|:---------|:---------| -| [chart](#specpluginshelmreleaseschart) | `string` | Required | -| [name](#specpluginshelmreleasesname) | `string` | Required | -| [namespace](#specpluginshelmreleasesnamespace) | `string` | Required | -| [set](#specpluginshelmreleasesset) | `array` | Optional | -| [values](#specpluginshelmreleasesvalues) | `array` | Optional | -| [version](#specpluginshelmreleasesversion) | `string` | Optional | +| Property | Type | Required | +|:---------------------------------------------------------------------------------|:----------|:---------| +| [chart](#specpluginshelmreleaseschart) | `string` | Required | +| [disableValidationOnInstall](#specpluginshelmreleasesdisablevalidationoninstall) | `boolean` | Optional | +| [name](#specpluginshelmreleasesname) | `string` | Required | +| [namespace](#specpluginshelmreleasesnamespace) | `string` | Required | +| [set](#specpluginshelmreleasesset) | `array` | Optional | +| [values](#specpluginshelmreleasesvalues) | `array` | Optional | +| [version](#specpluginshelmreleasesversion) | `string` | Optional | ## .spec.plugins.helm.releases.chart @@ -5437,6 +5887,12 @@ Overrides the default IAM role name prefix for the EKS workers The chart of the release +## .spec.plugins.helm.releases.disableValidationOnInstall + +### Description + +Disable running `helm diff` validation when installing the plugin, it will still be done when upgrading. + ## .spec.plugins.helm.releases.name ### Description @@ -5526,9 +5982,13 @@ The name of the kustomize plugin ## .spec.region +### Description + +Defines in which AWS region the cluster and all the related resources will be created. + ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-----------------| @@ -5576,6 +6036,10 @@ This map defines which will be the common tags that will be added to all the res |:----------------------------------------------|:---------|:---------| | [terraform](#spectoolsconfigurationterraform) | `object` | Required | +### Description + +Configuration for tools used by furyctl, like Terraform. + ## .spec.toolsConfiguration.terraform ### Properties @@ -5592,6 +6056,10 @@ This map defines which will be the common tags that will be added to all the res |:----------------------------------------------|:---------|:---------| | [s3](#spectoolsconfigurationterraformstates3) | `object` | Required | +### Description + +Configuration for storing the Terraform state of the cluster. + ## .spec.toolsConfiguration.terraform.state.s3 ### Properties @@ -5603,17 +6071,21 @@ This map defines which will be the common tags that will be added to all the res | [region](#spectoolsconfigurationterraformstates3region) | `string` | Required | | [skipRegionValidation](#spectoolsconfigurationterraformstates3skipregionvalidation) | `boolean` | Optional | +### Description + +Configuration for the S3 bucket used to store the Terraform state. + ## .spec.toolsConfiguration.terraform.state.s3.bucketName ### Description -This value defines which bucket will be used to store all the states +This value defines which bucket will be used to store all the states. ## .spec.toolsConfiguration.terraform.state.s3.keyPrefix ### Description -This value defines which folder will be used to store all the states inside the bucket +This value defines which folder will be used to store all the states inside the bucket. ### Constraints @@ -5631,11 +6103,11 @@ This value defines which folder will be used to store all the states inside the ### Description -This value defines in which region the bucket is located +This value defines in which region the bucket is located. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-----------------| @@ -5673,5 +6145,5 @@ This value defines in which region the bucket is located ### Description -This value defines if the region of the bucket should be validated or not by Terraform, useful when using a bucket in a recently added region +This value defines if the region of the bucket should be validated or not by Terraform, useful when using a bucket in a recently added region. diff --git a/docs/schemas/kfddistribution-kfd-v1alpha2.md b/docs/schemas/kfddistribution-kfd-v1alpha2.md index 1098c9799..63db395b1 100644 --- a/docs/schemas/kfddistribution-kfd-v1alpha2.md +++ b/docs/schemas/kfddistribution-kfd-v1alpha2.md @@ -2,8 +2,14 @@ This document explains the full schema for the `kind: KFDDistribution` for the `furyctl.yaml` file used by `furyctl`. This configuration file will be used to deploy the Kubernetes Fury Distribution modules on top of an existing Kubernetes cluster. -An example file can be found [here](https://github.com/sighupio/fury-distribution/blob/feature/schema-docs/templates/config/kfddistribution-kfd-v1alpha2.yaml.tpl). +An example configuration file can be created by running the following command: +```bash +furyctl create config --kind KFDDistribution --version v1.29.4 --name example-cluster +``` + +> [!NOTE] +> Replace the version with your desired version of KFD. ## Properties | Property | Type | Required | @@ -13,6 +19,10 @@ An example file can be found [here](https://github.com/sighupio/fury-distributio | [metadata](#metadata) | `object` | Required | | [spec](#spec) | `object` | Required | +### Description + +KFD modules deployed on top of an existing Kubernetes cluster. + ## .apiVersion ### Constraints @@ -29,7 +39,7 @@ An example file can be found [here](https://github.com/sighupio/fury-distributio ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:------------------| @@ -45,6 +55,10 @@ An example file can be found [here](https://github.com/sighupio/fury-distributio ## .metadata.name +### Description + +The name of the cluster. It will also be used as a prefix for all the other resources created. + ### Constraints **maximum length**: the maximum number of characters for this string is: `56` @@ -84,11 +98,15 @@ An example file can be found [here](https://github.com/sighupio/fury-distributio | [relativeVendorPath](#specdistributioncommonrelativevendorpath) | `string` | Optional | | [tolerations](#specdistributioncommontolerations) | `array` | Optional | +### Description + +Common configuration for all the distribution modules. + ## .spec.distribution.common.nodeSelector ### Description -The node selector to use to place the pods for all the KFD modules +The node selector to use to place the pods for all the KFD modules. Follows Kubernetes selector format. Example: `node.kubernetes.io/role: infra`. ## .spec.distribution.common.provider @@ -102,13 +120,13 @@ The node selector to use to place the pods for all the KFD modules ### Description -The type of the provider +The provider type. Don't set. FOR INTERNAL USE ONLY. ## .spec.distribution.common.registry ### Description -URL of the registry where to pull images from for the Distribution phase. (Default is registry.sighup.io/fury). +URL of the registry where to pull images from for the Distribution phase. (Default is `registry.sighup.io/fury`). NOTE: If plugins are pulling from the default registry, the registry will be replaced for the plugin too. @@ -116,7 +134,7 @@ NOTE: If plugins are pulling from the default registry, the registry will be rep ### Description -The relative path to the vendor directory, does not need to be changed +The relative path to the vendor directory, does not need to be changed. ## .spec.distribution.common.tolerations @@ -131,13 +149,19 @@ The relative path to the vendor directory, does not need to be changed ### Description -The tolerations that will be added to the pods for all the KFD modules +An array with the tolerations that will be added to the pods for all the KFD modules. Follows Kubernetes tolerations format. Example: + +```yaml +- effect: NoSchedule + key: node.kubernetes.io/role + value: infra +``` ## .spec.distribution.common.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -155,7 +179,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -202,7 +226,7 @@ The behavior of the configmap ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:----------| @@ -410,7 +434,7 @@ The behavior of the secret ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:----------| @@ -493,7 +517,7 @@ The type of the secret ### Description -The kubeconfig file path +The path to the kubeconfig file. ## .spec.distribution.modules @@ -522,11 +546,15 @@ The kubeconfig file path | [pomerium](#specdistributionmodulesauthpomerium) | `object` | Optional | | [provider](#specdistributionmodulesauthprovider) | `object` | Required | +### Description + +Configuration for the Auth module. + ## .spec.distribution.modules.auth.baseDomain ### Description -The base domain for the auth module +The base domain for the ingresses created by the Auth module (Gangplank, Pomerium, Dex). Notice that when the ingress module type is `dual`, these will use the `external` ingress class. ## .spec.distribution.modules.auth.dex @@ -539,17 +567,32 @@ The base domain for the auth module | [expiry](#specdistributionmodulesauthdexexpiry) | `object` | Optional | | [overrides](#specdistributionmodulesauthdexoverrides) | `object` | Optional | +### Description + +Configuration for the Dex package. + ## .spec.distribution.modules.auth.dex.additionalStaticClients ### Description -The additional static clients for dex +Additional static clients defitions that will be added to the default clients included with the distribution in Dex's configuration. Example: + +```yaml +additionalStaticClients: + - id: my-custom-client + name: "A custom additional static client" + redirectURIs: + - "https://myapp.tld/redirect" + - "https://alias.tld/oidc-callback" + secret: supersecretpassword +``` +Reference: https://dexidp.io/docs/connectors/local/ ## .spec.distribution.modules.auth.dex.connectors ### Description -The connectors for dex +A list with each item defining a Dex connector. Follows Dex connectors configuration format: https://dexidp.io/docs/connectors/ ## .spec.distribution.modules.auth.dex.expiry @@ -585,7 +628,7 @@ Dex signing key expiration time duration (default 6h). ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.auth.dex.overrides.tolerations @@ -600,13 +643,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.auth.dex.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -624,7 +667,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -647,13 +690,21 @@ The value of the toleration | [nodeSelector](#specdistributionmodulesauthoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulesauthoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the Auth module. + ## .spec.distribution.modules.auth.overrides.ingresses +### Description + +Override the definition of the Auth module ingresses. + ## .spec.distribution.modules.auth.overrides.nodeSelector ### Description -The node selector to use to place the pods for the auth module +Set to override the node selector used to place the pods of the Auth module. ## .spec.distribution.modules.auth.overrides.tolerations @@ -668,13 +719,13 @@ The node selector to use to place the pods for the auth module ### Description -The tolerations that will be added to the pods for the auth module +Set to override the tolerations that will be added to the pods of the Auth module. ## .spec.distribution.modules.auth.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -692,7 +743,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -788,7 +839,7 @@ override default routes for KFD components ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -802,7 +853,7 @@ override default routes for KFD components ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -892,27 +943,36 @@ cat ec_private.pem | base64 | [password](#specdistributionmodulesauthproviderbasicauthpassword) | `string` | Required | | [username](#specdistributionmodulesauthproviderbasicauthusername) | `string` | Required | +### Description + +Configuration for the HTTP Basic Auth provider. + ## .spec.distribution.modules.auth.provider.basicAuth.password ### Description -The password for the basic auth +The password for logging in with the HTTP basic authentication. ## .spec.distribution.modules.auth.provider.basicAuth.username ### Description -The username for the basic auth +The username for logging in with the HTTP basic authentication. ## .spec.distribution.modules.auth.provider.type ### Description -The type of the provider, must be ***none***, ***sso*** or ***basicAuth*** +The type of the Auth provider, options are: +- `none`: will disable authentication in the infrastructural ingresses. +- `sso`: will protect the infrastructural ingresses with Pomerium and Dex (SSO) and require authentication before accessing them. +- `basicAuth`: will protect the infrastructural ingresses with HTTP basic auth (username and password) authentication. + +Default is `none`. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:------------| @@ -930,6 +990,10 @@ The type of the provider, must be ***none***, ***sso*** or ***basicAuth*** | [type](#specdistributionmodulesdrtype) | `string` | Required | | [velero](#specdistributionmodulesdrvelero) | `object` | Optional | +### Description + +Configuration for the Disaster Recovery module. + ## .spec.distribution.modules.dr.overrides ### Properties @@ -940,13 +1004,17 @@ The type of the provider, must be ***none***, ***sso*** or ***basicAuth*** | [nodeSelector](#specdistributionmodulesdroverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulesdroverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the module. + ## .spec.distribution.modules.dr.overrides.ingresses ## .spec.distribution.modules.dr.overrides.nodeSelector ### Description -The node selector to use to place the pods for the security module +Set to override the node selector used to place the pods of the module. ## .spec.distribution.modules.dr.overrides.tolerations @@ -961,13 +1029,13 @@ The node selector to use to place the pods for the security module ### Description -The tolerations that will be added to the pods for the monitoring module +Set to override the tolerations that will be added to the pods of the module. ## .spec.distribution.modules.dr.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -985,7 +1053,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1002,11 +1070,13 @@ The value of the toleration ### Description -The type of the DR, must be ***none*** or ***on-premises*** +The type of the Disaster Recovery, must be `none` or `on-premises`. `none` disables the module and `on-premises` will install Velero and an optional MinIO deployment. + +Default is `none`. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:--------------| @@ -1017,22 +1087,27 @@ The type of the DR, must be ***none*** or ***on-premises*** ### Properties -| Property | Type | Required | -|:---------------------------------------------------------------------|:---------|:---------| -| [backend](#specdistributionmodulesdrvelerobackend) | `string` | Optional | -| [externalEndpoint](#specdistributionmodulesdrveleroexternalendpoint) | `object` | Optional | -| [overrides](#specdistributionmodulesdrvelerooverrides) | `object` | Optional | -| [retentionTime](#specdistributionmodulesdrveleroretentiontime) | `string` | Optional | +| Property | Type | Required | +|:-------------------------------------------------------------------------|:---------|:---------| +| [backend](#specdistributionmodulesdrvelerobackend) | `string` | Optional | +| [externalEndpoint](#specdistributionmodulesdrveleroexternalendpoint) | `object` | Optional | +| [overrides](#specdistributionmodulesdrvelerooverrides) | `object` | Optional | +| [schedules](#specdistributionmodulesdrveleroschedules) | `object` | Optional | +| [snapshotController](#specdistributionmodulesdrvelerosnapshotcontroller) | `object` | Optional | + +### Description + +Configuration for the Velero package. ## .spec.distribution.modules.dr.velero.backend ### Description -The backend for velero +The storage backend type for Velero. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external S3-compatible object storage instead of deploying an in-cluster MinIO. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1051,35 +1126,39 @@ The backend for velero | [insecure](#specdistributionmodulesdrveleroexternalendpointinsecure) | `boolean` | Optional | | [secretAccessKey](#specdistributionmodulesdrveleroexternalendpointsecretaccesskey) | `string` | Optional | +### Description + +Configuration for Velero's external storage backend. + ## .spec.distribution.modules.dr.velero.externalEndpoint.accessKeyId ### Description -The access key id for velero backend +The access key ID (username) for the external S3-compatible bucket. ## .spec.distribution.modules.dr.velero.externalEndpoint.bucketName ### Description -The bucket name for velero backend +The bucket name of the external S3-compatible object storage. ## .spec.distribution.modules.dr.velero.externalEndpoint.endpoint ### Description -The endpoint for velero +External S3-compatible endpoint for Velero's storage. ## .spec.distribution.modules.dr.velero.externalEndpoint.insecure ### Description -If true, the endpoint will be insecure +If true, will use HTTP as protocol instead of HTTPS. ## .spec.distribution.modules.dr.velero.externalEndpoint.secretAccessKey ### Description -The secret access key for velero backend +The secret access key (password) for the external S3-compatible bucket. ## .spec.distribution.modules.dr.velero.overrides @@ -1094,7 +1173,7 @@ The secret access key for velero backend ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.dr.velero.overrides.tolerations @@ -1109,13 +1188,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.dr.velero.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1133,7 +1212,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1146,11 +1225,112 @@ The key of the toleration The value of the toleration -## .spec.distribution.modules.dr.velero.retentionTime +## .spec.distribution.modules.dr.velero.schedules + +### Properties + +| Property | Type | Required | +|:--------------------------------------------------------------------|:----------|:---------| +| [definitions](#specdistributionmodulesdrveleroschedulesdefinitions) | `object` | Optional | +| [install](#specdistributionmodulesdrveleroschedulesinstall) | `boolean` | Optional | ### Description -The retention time for velero +Configuration for Velero's backup schedules. + +## .spec.distribution.modules.dr.velero.schedules.definitions + +### Properties + +| Property | Type | Required | +|:---------------------------------------------------------------------------|:---------|:---------| +| [full](#specdistributionmodulesdrveleroschedulesdefinitionsfull) | `object` | Optional | +| [manifests](#specdistributionmodulesdrveleroschedulesdefinitionsmanifests) | `object` | Optional | + +### Description + +Configuration for Velero schedules. + +## .spec.distribution.modules.dr.velero.schedules.definitions.full + +### Properties + +| Property | Type | Required | +|:---------------------------------------------------------------------------------------------|:----------|:---------| +| [schedule](#specdistributionmodulesdrveleroschedulesdefinitionsfullschedule) | `string` | Optional | +| [snapshotMoveData](#specdistributionmodulesdrveleroschedulesdefinitionsfullsnapshotmovedata) | `boolean` | Optional | +| [ttl](#specdistributionmodulesdrveleroschedulesdefinitionsfullttl) | `string` | Optional | + +### Description + +Configuration for Velero's manifests backup schedule. + +## .spec.distribution.modules.dr.velero.schedules.definitions.full.schedule + +### Description + +The cron expression for the `full` backup schedule (default `0 1 * * *`). + +## .spec.distribution.modules.dr.velero.schedules.definitions.full.snapshotMoveData + +### Description + +EXPERIMENTAL (if you do more than one backups, the following backups after the first are not automatically restorable, see https://github.com/vmware-tanzu/velero/issues/7057#issuecomment-2466815898 for the manual restore solution): SnapshotMoveData specifies whether snapshot data should be moved. Velero will create a new volume from the snapshot and upload the content to the storageLocation. + +## .spec.distribution.modules.dr.velero.schedules.definitions.full.ttl + +### Description + +The Time To Live (TTL) of the backups created by the backup schedules (default `720h0m0s`, 30 days). Notice that changing this value will affect only newly created backups, prior backups will keep the old TTL. + +## .spec.distribution.modules.dr.velero.schedules.definitions.manifests + +### Properties + +| Property | Type | Required | +|:----------------------------------------------------------------------------------|:---------|:---------| +| [schedule](#specdistributionmodulesdrveleroschedulesdefinitionsmanifestsschedule) | `string` | Optional | +| [ttl](#specdistributionmodulesdrveleroschedulesdefinitionsmanifeststtl) | `string` | Optional | + +### Description + +Configuration for Velero's manifests backup schedule. + +## .spec.distribution.modules.dr.velero.schedules.definitions.manifests.schedule + +### Description + +The cron expression for the `manifests` backup schedule (default `*/15 * * * *`). + +## .spec.distribution.modules.dr.velero.schedules.definitions.manifests.ttl + +### Description + +The Time To Live (TTL) of the backups created by the backup schedules (default `720h0m0s`, 30 days). Notice that changing this value will affect only newly created backups, prior backups will keep the old TTL. + +## .spec.distribution.modules.dr.velero.schedules.install + +### Description + +Whether to install or not the default `manifests` and `full` backups schedules. Default is `true`. + +## .spec.distribution.modules.dr.velero.snapshotController + +### Properties + +| Property | Type | Required | +|:---------------------------------------------------------------------|:----------|:---------| +| [install](#specdistributionmodulesdrvelerosnapshotcontrollerinstall) | `boolean` | Optional | + +### Description + +Configuration for the additional snapshotController component installation. + +## .spec.distribution.modules.dr.velero.snapshotController.install + +### Description + +Whether to install or not the snapshotController component in the cluster. Before enabling this field, check if your CSI driver does not have snapshotController built-in. ## .spec.distribution.modules.ingress @@ -1168,7 +1348,7 @@ The retention time for velero ### Description -the base domain used for all the KFD ingresses, if in the nginx dual configuration, it should be the same as the .spec.distribution.modules.ingress.dns.private.name zone +The base domain used for all the KFD infrastructural ingresses. If using the nginx `dual` type, this value should be the same as the domain associated with the `internal` ingress class. ## .spec.distribution.modules.ingress.certManager @@ -1179,6 +1359,10 @@ the base domain used for all the KFD ingresses, if in the nginx dual configurati | [clusterIssuer](#specdistributionmodulesingresscertmanagerclusterissuer) | `object` | Required | | [overrides](#specdistributionmodulesingresscertmanageroverrides) | `object` | Optional | +### Description + +Configuration for the cert-manager package. Required even if `ingress.nginx.type` is `none`, cert-manager is used for managing other certificates in the cluster besides the TLS termination certificates for the ingresses. + ## .spec.distribution.modules.ingress.certManager.clusterIssuer ### Properties @@ -1190,33 +1374,37 @@ the base domain used for all the KFD ingresses, if in the nginx dual configurati | [solvers](#specdistributionmodulesingresscertmanagerclusterissuersolvers) | `array` | Optional | | [type](#specdistributionmodulesingresscertmanagerclusterissuertype) | `string` | Optional | +### Description + +Configuration for the cert-manager's ACME clusterIssuer used to request certificates from Let's Encrypt. + ## .spec.distribution.modules.ingress.certManager.clusterIssuer.email ### Description -The email of the cluster issuer +The email address to use during the certificate issuing process. ## .spec.distribution.modules.ingress.certManager.clusterIssuer.name ### Description -The name of the cluster issuer +The name of the clusterIssuer. ## .spec.distribution.modules.ingress.certManager.clusterIssuer.solvers ### Description -The custom solvers configurations +The list of challenge solvers to use instead of the default one for the `http01` challenge. Check [cert manager's documentation](https://cert-manager.io/docs/configuration/acme/#adding-multiple-solver-types) for examples for this field. ## .spec.distribution.modules.ingress.certManager.clusterIssuer.type ### Description -The type of the cluster issuer, must be ***http01*** +The type of the clusterIssuer. Only `http01` challenge is supported for KFDDistribution kind. See solvers for arbitrary configurations. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1235,7 +1423,7 @@ The type of the cluster issuer, must be ***http01*** ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.ingress.certManager.overrides.tolerations @@ -1250,13 +1438,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.ingress.certManager.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1274,7 +1462,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1308,7 +1496,7 @@ The value of the toleration ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.ingress.forecastle.overrides.tolerations @@ -1323,13 +1511,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.ingress.forecastle.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1347,7 +1535,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1372,7 +1560,7 @@ The value of the toleration ### Description -Configurations for the nginx ingress controller module +Configurations for the Ingress nginx controller package. ## .spec.distribution.modules.ingress.nginx.overrides @@ -1387,7 +1575,7 @@ Configurations for the nginx ingress controller module ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.ingress.nginx.overrides.tolerations @@ -1402,13 +1590,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.ingress.nginx.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1426,7 +1614,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1452,11 +1640,11 @@ The value of the toleration ### Description -The provider of the TLS certificate, must be ***none***, ***certManager*** or ***secret*** +The provider of the TLS certificates for the ingresses, one of: `none`, `certManager`, or `secret`. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:--------------| @@ -1474,25 +1662,42 @@ The provider of the TLS certificate, must be ***none***, ***certManager*** or ** | [cert](#specdistributionmodulesingressnginxtlssecretcert) | `string` | Required | | [key](#specdistributionmodulesingressnginxtlssecretkey) | `string` | Required | +### Description + +Kubernetes TLS secret for the ingresses TLS certificate. + ## .spec.distribution.modules.ingress.nginx.tls.secret.ca +### Description + +The Certificate Authority certificate file's content. You can use the `"{file://}"` notation to get the content from a file. + ## .spec.distribution.modules.ingress.nginx.tls.secret.cert ### Description -The certificate file content or you can use the file notation to get the content from a file +The certificate file's content. You can use the `"{file://}"` notation to get the content from a file. ## .spec.distribution.modules.ingress.nginx.tls.secret.key +### Description + +The signing key file's content. You can use the `"{file://}"` notation to get the content from a file. + ## .spec.distribution.modules.ingress.nginx.type ### Description -The type of the nginx ingress controller, must be ***none***, ***single*** or ***dual*** +The type of the Ingress nginx controller, options are: +- `none`: no ingress controller will be installed and no infrastructural ingresses will be created. +- `single`: a single ingress controller with ingress class `nginx` will be installed to manage all the ingress resources, infrastructural ingresses will be created. +- `dual`: two independent ingress controllers will be installed, one for the `internal` ingress class intended for private ingresses and one for the `external` ingress class intended for public ingresses. KFD infrastructural ingresses wil use the `internal` ingress class when using the dual type. + +Default is `single`. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1510,6 +1715,10 @@ The type of the nginx ingress controller, must be ***none***, ***single*** or ** | [nodeSelector](#specdistributionmodulesingressoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulesingressoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the Ingress module. + ## .spec.distribution.modules.ingress.overrides.ingresses ### Properties @@ -1532,25 +1741,25 @@ The type of the nginx ingress controller, must be ***none***, ***single*** or ** ### Description -If true, the ingress will not have authentication +If true, the ingress will not have authentication even if `.spec.modules.auth.provider.type` is SSO or Basic Auth. ## .spec.distribution.modules.ingress.overrides.ingresses.forecastle.host ### Description -The host of the ingress +Use this host for the ingress instead of the default one. ## .spec.distribution.modules.ingress.overrides.ingresses.forecastle.ingressClass ### Description -The ingress class of the ingress +Use this ingress class for the ingress instead of the default one. ## .spec.distribution.modules.ingress.overrides.nodeSelector ### Description -The node selector to use to place the pods for the ingress module +Set to override the node selector used to place the pods of the Ingress module. ## .spec.distribution.modules.ingress.overrides.tolerations @@ -1565,13 +1774,13 @@ The node selector to use to place the pods for the ingress module ### Description -The tolerations that will be added to the pods for the ingress module +Set to override the tolerations that will be added to the pods of the Ingress module. ## .spec.distribution.modules.ingress.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1589,7 +1798,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1617,6 +1826,10 @@ The value of the toleration | [overrides](#specdistributionmodulesloggingoverrides) | `object` | Optional | | [type](#specdistributionmodulesloggingtype) | `string` | Required | +### Description + +Configuration for the Logging module. + ## .spec.distribution.modules.logging.cerebro ### Properties @@ -1625,6 +1838,10 @@ The value of the toleration |:-------------------------------------------------------------|:---------|:---------| | [overrides](#specdistributionmodulesloggingcerebrooverrides) | `object` | Optional | +### Description + +DEPRECATED since KFD v1.26.6, 1.27.5, v1.28.0. + ## .spec.distribution.modules.logging.cerebro.overrides ### Properties @@ -1638,7 +1855,7 @@ The value of the toleration ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.logging.cerebro.overrides.tolerations @@ -1653,13 +1870,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.logging.cerebro.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1677,7 +1894,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1707,55 +1924,55 @@ The value of the toleration ### Description -when using the customOutputs logging type, you need to manually specify the spec of the several Output and ClusterOutputs that the Logging Operator expects to forward the logs collected by the pre-defined flows. +When using the `customOutputs` logging type, you need to manually specify the spec of the several `Output` and `ClusterOutputs` that the Logging Operator expects to forward the logs collected by the pre-defined flows. ## .spec.distribution.modules.logging.customOutputs.audit ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `audit` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.errors ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `errors` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.events ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `events` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.infra ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `infra` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.ingressNginx ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `ingressNginx` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.kubernetes ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `kubernetes` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.systemdCommon ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `systemdCommon` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.systemdEtcd ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `systemdEtcd` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.loki @@ -1766,12 +1983,21 @@ This value defines where the output from Flow will be sent. Will be the `spec` s | [backend](#specdistributionmoduleslogginglokibackend) | `string` | Optional | | [externalEndpoint](#specdistributionmoduleslogginglokiexternalendpoint) | `object` | Optional | | [resources](#specdistributionmoduleslogginglokiresources) | `object` | Optional | +| [tsdbStartDate](#specdistributionmoduleslogginglokitsdbstartdate) | `string` | Required | + +### Description + +Configuration for the Loki package. ## .spec.distribution.modules.logging.loki.backend +### Description + +The storage backend type for Loki. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external object storage instead of deploying an in-cluster MinIO. + ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1790,35 +2016,39 @@ This value defines where the output from Flow will be sent. Will be the `spec` s | [insecure](#specdistributionmoduleslogginglokiexternalendpointinsecure) | `boolean` | Optional | | [secretAccessKey](#specdistributionmoduleslogginglokiexternalendpointsecretaccesskey) | `string` | Optional | +### Description + +Configuration for Loki's external storage backend. + ## .spec.distribution.modules.logging.loki.externalEndpoint.accessKeyId ### Description -The access key id of the loki external endpoint +The access key ID (username) for the external S3-compatible bucket. ## .spec.distribution.modules.logging.loki.externalEndpoint.bucketName ### Description -The bucket name of the loki external endpoint +The bucket name of the external S3-compatible object storage. ## .spec.distribution.modules.logging.loki.externalEndpoint.endpoint ### Description -The endpoint of the loki external endpoint +External S3-compatible endpoint for Loki's storage. ## .spec.distribution.modules.logging.loki.externalEndpoint.insecure ### Description -If true, the loki external endpoint will be insecure +If true, will use HTTP as protocol instead of HTTPS. ## .spec.distribution.modules.logging.loki.externalEndpoint.secretAccessKey ### Description -The secret access key of the loki external endpoint +The secret access key (password) for the external S3-compatible bucket. ## .spec.distribution.modules.logging.loki.resources @@ -1842,13 +2072,13 @@ The secret access key of the loki external endpoint ### Description -The cpu limit for the loki pods +The CPU limit for the Pod. Example: `1000m`. ## .spec.distribution.modules.logging.loki.resources.limits.memory ### Description -The memory limit for the opensearch pods +The memory limit for the Pod. Example: `1G`. ## .spec.distribution.modules.logging.loki.resources.requests @@ -1863,13 +2093,23 @@ The memory limit for the opensearch pods ### Description -The cpu request for the prometheus pods +The CPU request for the Pod, in cores. Example: `500m`. ## .spec.distribution.modules.logging.loki.resources.requests.memory ### Description -The memory request for the opensearch pods +The memory request for the Pod. Example: `500M`. + +## .spec.distribution.modules.logging.loki.tsdbStartDate + +### Description + +Starting from versions 1.28.4, 1.29.5 and 1.30.0 of KFD, Loki will change the time series database from BoltDB to TSDB and the schema from v11 to v13 that it uses to store the logs. + +The value of this field will determine the date when Loki will start writing using the new TSDB and the schema v13, always at midnight UTC. The old BoltDB and schema will be kept until they expire for reading purposes. + +Value must be a string in `ISO 8601` date format (`yyyy-mm-dd`). Example: `2024-11-18`. ## .spec.distribution.modules.logging.minio @@ -1881,6 +2121,10 @@ The memory request for the opensearch pods | [rootUser](#specdistributionmodulesloggingminiorootuser) | `object` | Optional | | [storageSize](#specdistributionmodulesloggingminiostoragesize) | `string` | Optional | +### Description + +Configuration for Logging's MinIO deployment. + ## .spec.distribution.modules.logging.minio.overrides ### Properties @@ -1894,7 +2138,7 @@ The memory request for the opensearch pods ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.logging.minio.overrides.tolerations @@ -1909,13 +2153,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.logging.minio.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1933,7 +2177,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1959,19 +2203,19 @@ The value of the toleration ### Description -The password of the minio root user +The password for the default MinIO root user. ## .spec.distribution.modules.logging.minio.rootUser.username ### Description -The username of the minio root user +The username for the default MinIO root user. ## .spec.distribution.modules.logging.minio.storageSize ### Description -The PVC size for each minio disk, 6 disks total +The PVC size for each MinIO disk, 6 disks total. ## .spec.distribution.modules.logging.opensearch @@ -1997,7 +2241,7 @@ The PVC size for each minio disk, 6 disks total ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.logging.opensearch.overrides.tolerations @@ -2012,13 +2256,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.logging.opensearch.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2036,7 +2280,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2071,13 +2315,13 @@ The value of the toleration ### Description -The cpu limit for the loki pods +The CPU limit for the Pod. Example: `1000m`. ## .spec.distribution.modules.logging.opensearch.resources.limits.memory ### Description -The memory limit for the opensearch pods +The memory limit for the Pod. Example: `1G`. ## .spec.distribution.modules.logging.opensearch.resources.requests @@ -2092,29 +2336,29 @@ The memory limit for the opensearch pods ### Description -The cpu request for the prometheus pods +The CPU request for the Pod, in cores. Example: `500m`. ## .spec.distribution.modules.logging.opensearch.resources.requests.memory ### Description -The memory request for the opensearch pods +The memory request for the Pod. Example: `500M`. ## .spec.distribution.modules.logging.opensearch.storageSize ### Description -The storage size for the opensearch pods +The storage size for the OpenSearch volumes. Follows Kubernetes resources storage requests. Default is `150Gi`. ## .spec.distribution.modules.logging.opensearch.type ### Description -The type of the opensearch, must be ***single*** or ***triple*** +The type of OpenSearch deployment. One of: `single` for a single replica or `triple` for an HA 3-replicas deployment. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2129,6 +2373,10 @@ The type of the opensearch, must be ***single*** or ***triple*** |:--------------------------------------------------------------|:---------|:---------| | [overrides](#specdistributionmodulesloggingoperatoroverrides) | `object` | Optional | +### Description + +Configuration for the Logging Operator. + ## .spec.distribution.modules.logging.operator.overrides ### Properties @@ -2142,7 +2390,7 @@ The type of the opensearch, must be ***single*** or ***triple*** ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.logging.operator.overrides.tolerations @@ -2157,13 +2405,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.logging.operator.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2181,7 +2429,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2204,13 +2452,17 @@ The value of the toleration | [nodeSelector](#specdistributionmodulesloggingoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulesloggingoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the module. + ## .spec.distribution.modules.logging.overrides.ingresses ## .spec.distribution.modules.logging.overrides.nodeSelector ### Description -The node selector to use to place the pods for the security module +Set to override the node selector used to place the pods of the module. ## .spec.distribution.modules.logging.overrides.tolerations @@ -2225,13 +2477,13 @@ The node selector to use to place the pods for the security module ### Description -The tolerations that will be added to the pods for the monitoring module +Set to override the tolerations that will be added to the pods of the module. ## .spec.distribution.modules.logging.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2249,7 +2501,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2266,11 +2518,17 @@ The value of the toleration ### Description -selects the logging stack. Choosing none will disable the centralized logging. Choosing opensearch will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored. Choosing loki will use a distributed Grafana Loki instead of OpenSearh for storage. Choosing customOuput the Logging Operator will be deployed and installed but with no local storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage. +Selects the logging stack. Options are: +- `none`: will disable the centralized logging. +- `opensearch`: will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored. +- `loki`: will use a distributed Grafana Loki instead of OpenSearch for storage. +- `customOuputs`: the Logging Operator will be deployed and installed but without in-cluster storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage. + +Default is `opensearch`. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:----------------| @@ -2299,7 +2557,7 @@ selects the logging stack. Choosing none will disable the centralized logging. C ### Description -configuration for the Monitoring module components +Configuration for the Monitoring module. ## .spec.distribution.modules.monitoring.alertmanager @@ -2315,19 +2573,19 @@ configuration for the Monitoring module components ### Description -The webhook url to send deadman switch monitoring, for example to use with healthchecks.io +The webhook URL to send dead man's switch monitoring, for example to use with healthchecks.io. ## .spec.distribution.modules.monitoring.alertmanager.installDefaultRules ### Description -If true, the default rules will be installed +Set to false to avoid installing the Prometheus rules (alerts) included with the distribution. ## .spec.distribution.modules.monitoring.alertmanager.slackWebhookUrl ### Description -The slack webhook url to send alerts +The Slack webhook URL where to send the infrastructural and workload alerts to. ## .spec.distribution.modules.monitoring.blackboxExporter @@ -2350,7 +2608,7 @@ The slack webhook url to send alerts ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.monitoring.blackboxExporter.overrides.tolerations @@ -2365,13 +2623,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.monitoring.blackboxExporter.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2389,7 +2647,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2433,7 +2691,7 @@ Notice that by default anonymous access is enabled. ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.monitoring.grafana.overrides.tolerations @@ -2448,13 +2706,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.monitoring.grafana.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2472,7 +2730,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2518,7 +2776,7 @@ More details in [Grafana's documentation](https://grafana.com/docs/grafana/lates ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.monitoring.kubeStateMetrics.overrides.tolerations @@ -2533,13 +2791,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.monitoring.kubeStateMetrics.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2557,7 +2815,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2581,15 +2839,19 @@ The value of the toleration | [overrides](#specdistributionmodulesmonitoringmimiroverrides) | `object` | Optional | | [retentionTime](#specdistributionmodulesmonitoringmimirretentiontime) | `string` | Optional | +### Description + +Configuration for the Mimir package. + ## .spec.distribution.modules.monitoring.mimir.backend ### Description -The backend for the mimir pods, must be ***minio*** or ***externalEndpoint*** +The storage backend type for Mimir. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external S3-compatible object storage instead of deploying an in-cluster MinIO. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2608,35 +2870,39 @@ The backend for the mimir pods, must be ***minio*** or ***externalEndpoint*** | [insecure](#specdistributionmodulesmonitoringmimirexternalendpointinsecure) | `boolean` | Optional | | [secretAccessKey](#specdistributionmodulesmonitoringmimirexternalendpointsecretaccesskey) | `string` | Optional | +### Description + +Configuration for Mimir's external storage backend. + ## .spec.distribution.modules.monitoring.mimir.externalEndpoint.accessKeyId ### Description -The access key id of the external mimir backend +The access key ID (username) for the external S3-compatible bucket. ## .spec.distribution.modules.monitoring.mimir.externalEndpoint.bucketName ### Description -The bucket name of the external mimir backend +The bucket name of the external S3-compatible object storage. ## .spec.distribution.modules.monitoring.mimir.externalEndpoint.endpoint ### Description -The endpoint of the external mimir backend +The external S3-compatible endpoint for Mimir's storage. ## .spec.distribution.modules.monitoring.mimir.externalEndpoint.insecure ### Description -If true, the external mimir backend will not use tls +If true, will use HTTP as protocol instead of HTTPS. ## .spec.distribution.modules.monitoring.mimir.externalEndpoint.secretAccessKey ### Description -The secret access key of the external mimir backend +The secret access key (password) for the external S3-compatible bucket. ## .spec.distribution.modules.monitoring.mimir.overrides @@ -2651,7 +2917,7 @@ The secret access key of the external mimir backend ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.monitoring.mimir.overrides.tolerations @@ -2666,13 +2932,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.monitoring.mimir.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2690,7 +2956,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2707,7 +2973,7 @@ The value of the toleration ### Description -The retention time for the mimir pods +The retention time for the logs stored in Mimir. Default is `30d`. Value must match the regular expression `[0-9]+(ns|us|µs|ms|s|m|h|d|w|y)` where y = 365 days. ## .spec.distribution.modules.monitoring.minio @@ -2719,6 +2985,10 @@ The retention time for the mimir pods | [rootUser](#specdistributionmodulesmonitoringminiorootuser) | `object` | Optional | | [storageSize](#specdistributionmodulesmonitoringminiostoragesize) | `string` | Optional | +### Description + +Configuration for Monitoring's MinIO deployment. + ## .spec.distribution.modules.monitoring.minio.overrides ### Properties @@ -2732,7 +3002,7 @@ The retention time for the mimir pods ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.monitoring.minio.overrides.tolerations @@ -2747,13 +3017,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.monitoring.minio.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2771,7 +3041,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2797,19 +3067,19 @@ The value of the toleration ### Description -The password for the minio root user +The password for the default MinIO root user. ## .spec.distribution.modules.monitoring.minio.rootUser.username ### Description -The username for the minio root user +The username for the default MinIO root user. ## .spec.distribution.modules.monitoring.minio.storageSize ### Description -The storage size for the minio pods +The PVC size for each MinIO disk, 6 disks total. ## .spec.distribution.modules.monitoring.overrides @@ -2821,13 +3091,17 @@ The storage size for the minio pods | [nodeSelector](#specdistributionmodulesmonitoringoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulesmonitoringoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the module. + ## .spec.distribution.modules.monitoring.overrides.ingresses ## .spec.distribution.modules.monitoring.overrides.nodeSelector ### Description -The node selector to use to place the pods for the security module +Set to override the node selector used to place the pods of the module. ## .spec.distribution.modules.monitoring.overrides.tolerations @@ -2842,13 +3116,13 @@ The node selector to use to place the pods for the security module ### Description -The tolerations that will be added to the pods for the monitoring module +Set to override the tolerations that will be added to the pods of the module. ## .spec.distribution.modules.monitoring.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2866,7 +3140,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2921,13 +3195,13 @@ Set this option to ship the collected metrics to a remote Prometheus receiver. ### Description -The cpu limit for the loki pods +The CPU limit for the Pod. Example: `1000m`. ## .spec.distribution.modules.monitoring.prometheus.resources.limits.memory ### Description -The memory limit for the opensearch pods +The memory limit for the Pod. Example: `1G`. ## .spec.distribution.modules.monitoring.prometheus.resources.requests @@ -2942,31 +3216,31 @@ The memory limit for the opensearch pods ### Description -The cpu request for the prometheus pods +The CPU request for the Pod, in cores. Example: `500m`. ## .spec.distribution.modules.monitoring.prometheus.resources.requests.memory ### Description -The memory request for the opensearch pods +The memory request for the Pod. Example: `500M`. ## .spec.distribution.modules.monitoring.prometheus.retentionSize ### Description -The retention size for the k8s Prometheus instance. +The retention size for the `k8s` Prometheus instance. ## .spec.distribution.modules.monitoring.prometheus.retentionTime ### Description -The retention time for the K8s Prometheus instance. +The retention time for the `k8s` Prometheus instance. ## .spec.distribution.modules.monitoring.prometheus.storageSize ### Description -The storage size for the k8s Prometheus instance. +The storage size for the `k8s` Prometheus instance. ## .spec.distribution.modules.monitoring.prometheusAgent @@ -3007,13 +3281,13 @@ Set this option to ship the collected metrics to a remote Prometheus receiver. ### Description -The cpu limit for the loki pods +The CPU limit for the Pod. Example: `1000m`. ## .spec.distribution.modules.monitoring.prometheusAgent.resources.limits.memory ### Description -The memory limit for the opensearch pods +The memory limit for the Pod. Example: `1G`. ## .spec.distribution.modules.monitoring.prometheusAgent.resources.requests @@ -3028,28 +3302,30 @@ The memory limit for the opensearch pods ### Description -The cpu request for the prometheus pods +The CPU request for the Pod, in cores. Example: `500m`. ## .spec.distribution.modules.monitoring.prometheusAgent.resources.requests.memory ### Description -The memory request for the opensearch pods +The memory request for the Pod. Example: `500M`. ## .spec.distribution.modules.monitoring.type ### Description -The type of the monitoring, must be ***none***, ***prometheus***, ***prometheusAgent*** or ***mimir***. +The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or `mimir`. - `none`: will disable the whole monitoring stack. -- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instace, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more. -- `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster. -- `mimir`: will install the same as the `prometheus` option, and in addition Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage. +- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instance, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more. +- `prometheusAgent`: will install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster. +- `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage. + +Default is `prometheus`. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:------------------| @@ -3079,7 +3355,7 @@ The type of the monitoring, must be ***none***, ***prometheus***, ***prometheusA ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.monitoring.x509Exporter.overrides.tolerations @@ -3094,13 +3370,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.monitoring.x509Exporter.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3118,7 +3394,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3142,6 +3418,10 @@ The value of the toleration | [tigeraOperator](#specdistributionmodulesnetworkingtigeraoperator) | `object` | Optional | | [type](#specdistributionmodulesnetworkingtype) | `string` | Required | +### Description + +Configuration for the Networking module. + ## .spec.distribution.modules.networking.cilium ### Properties @@ -3154,6 +3434,10 @@ The value of the toleration ## .spec.distribution.modules.networking.cilium.maskSize +### Description + +The mask size to use for the Pods network on each node. + ## .spec.distribution.modules.networking.cilium.overrides ### Properties @@ -3167,7 +3451,7 @@ The value of the toleration ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.networking.cilium.overrides.tolerations @@ -3182,13 +3466,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.networking.cilium.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3206,7 +3490,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3221,6 +3505,10 @@ The value of the toleration ## .spec.distribution.modules.networking.cilium.podCidr +### Description + +Allows specifing a CIDR for the Pods network different from `.spec.kubernetes.podCidr`. If not set the default is to use `.spec.kubernetes.podCidr`. + ### Constraints **pattern**: the string must match the following regular expression: @@ -3241,13 +3529,17 @@ The value of the toleration | [nodeSelector](#specdistributionmodulesnetworkingoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulesnetworkingoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the module. + ## .spec.distribution.modules.networking.overrides.ingresses ## .spec.distribution.modules.networking.overrides.nodeSelector ### Description -The node selector to use to place the pods for the security module +Set to override the node selector used to place the pods of the module. ## .spec.distribution.modules.networking.overrides.tolerations @@ -3262,13 +3554,13 @@ The node selector to use to place the pods for the security module ### Description -The tolerations that will be added to the pods for the monitoring module +Set to override the tolerations that will be added to the pods of the module. ## .spec.distribution.modules.networking.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3286,7 +3578,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3320,7 +3612,7 @@ The value of the toleration ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.networking.tigeraOperator.overrides.tolerations @@ -3335,13 +3627,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.networking.tigeraOperator.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3359,7 +3651,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3376,11 +3668,11 @@ The value of the toleration ### Description -The type of networking to use, either ***none***, ***calico*** or ***cilium*** +The type of CNI plugin to use, either `none`, `calico` (Tigera Operator) or `cilium`. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3399,6 +3691,10 @@ The type of networking to use, either ***none***, ***calico*** or ***cilium*** | [overrides](#specdistributionmodulespolicyoverrides) | `object` | Optional | | [type](#specdistributionmodulespolicytype) | `string` | Required | +### Description + +Configuration for the Policy module. + ## .spec.distribution.modules.policy.gatekeeper ### Properties @@ -3410,6 +3706,10 @@ The type of networking to use, either ***none***, ***calico*** or ***cilium*** | [installDefaultPolicies](#specdistributionmodulespolicygatekeeperinstalldefaultpolicies) | `boolean` | Required | | [overrides](#specdistributionmodulespolicygatekeeperoverrides) | `object` | Optional | +### Description + +Configuration for the Gatekeeper package. + ## .spec.distribution.modules.policy.gatekeeper.additionalExcludedNamespaces ### Description @@ -3420,11 +3720,11 @@ This parameter adds namespaces to Gatekeeper's exemption list, so it will not en ### Description -The enforcement action to use for the gatekeeper module +The default enforcement action to use for the included constraints. `deny` will block the admission when violations to the policies are found, `warn` will show a message to the user but will admit the violating requests and `dryrun` won't give any feedback to the user but it will log the violations. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3436,7 +3736,7 @@ The enforcement action to use for the gatekeeper module ### Description -If true, the default policies will be installed +Set to `false` to avoid installing the default Gatekeeper policies (constraints templates and constraints) included with the distribution. ## .spec.distribution.modules.policy.gatekeeper.overrides @@ -3451,7 +3751,7 @@ If true, the default policies will be installed ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.policy.gatekeeper.overrides.tolerations @@ -3466,13 +3766,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.policy.gatekeeper.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3490,7 +3790,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3514,17 +3814,21 @@ The value of the toleration | [overrides](#specdistributionmodulespolicykyvernooverrides) | `object` | Optional | | [validationFailureAction](#specdistributionmodulespolicykyvernovalidationfailureaction) | `string` | Required | +### Description + +Configuration for the Kyverno package. + ## .spec.distribution.modules.policy.kyverno.additionalExcludedNamespaces ### Description -This parameter adds namespaces to Kyverno's exemption list, so it will not enforce the constraints on them. +This parameter adds namespaces to Kyverno's exemption list, so it will not enforce the policies on them. ## .spec.distribution.modules.policy.kyverno.installDefaultPolicies ### Description -If true, the default policies will be installed +Set to `false` to avoid installing the default Kyverno policies included with distribution. ## .spec.distribution.modules.policy.kyverno.overrides @@ -3539,7 +3843,7 @@ If true, the default policies will be installed ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.policy.kyverno.overrides.tolerations @@ -3554,13 +3858,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.policy.kyverno.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3578,7 +3882,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3595,16 +3899,16 @@ The value of the toleration ### Description -The validation failure action to use for the kyverno module +The validation failure action to use for the policies, `Enforce` will block when a request does not comply with the policies and `Audit` will not block but log when a request does not comply with the policies. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:----------| -|`"audit"` | -|`"enforce"`| +|`"Audit"` | +|`"Enforce"`| ## .spec.distribution.modules.policy.overrides @@ -3616,13 +3920,17 @@ The validation failure action to use for the kyverno module | [nodeSelector](#specdistributionmodulespolicyoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulespolicyoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the module. + ## .spec.distribution.modules.policy.overrides.ingresses ## .spec.distribution.modules.policy.overrides.nodeSelector ### Description -The node selector to use to place the pods for the security module +Set to override the node selector used to place the pods of the module. ## .spec.distribution.modules.policy.overrides.tolerations @@ -3637,13 +3945,13 @@ The node selector to use to place the pods for the security module ### Description -The tolerations that will be added to the pods for the monitoring module +Set to override the tolerations that will be added to the pods of the module. ## .spec.distribution.modules.policy.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3661,7 +3969,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3678,11 +3986,13 @@ The value of the toleration ### Description -The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno*** +The type of policy enforcement to use, either `none`, `gatekeeper` or `kyverno`. + +Default is `none`. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------| @@ -3701,6 +4011,10 @@ The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno** | [tempo](#specdistributionmodulestracingtempo) | `object` | Optional | | [type](#specdistributionmodulestracingtype) | `string` | Required | +### Description + +Configuration for the Tracing module. + ## .spec.distribution.modules.tracing.minio ### Properties @@ -3711,6 +4025,10 @@ The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno** | [rootUser](#specdistributionmodulestracingminiorootuser) | `object` | Optional | | [storageSize](#specdistributionmodulestracingminiostoragesize) | `string` | Optional | +### Description + +Configuration for Tracing's MinIO deployment. + ## .spec.distribution.modules.tracing.minio.overrides ### Properties @@ -3724,7 +4042,7 @@ The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno** ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.tracing.minio.overrides.tolerations @@ -3739,13 +4057,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.tracing.minio.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3763,7 +4081,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3789,19 +4107,19 @@ The value of the toleration ### Description -The password for the minio root user +The password for the default MinIO root user. ## .spec.distribution.modules.tracing.minio.rootUser.username ### Description -The username for the minio root user +The username for the default MinIO root user. ## .spec.distribution.modules.tracing.minio.storageSize ### Description -The storage size for the minio pods +The PVC size for each MinIO disk, 6 disks total. ## .spec.distribution.modules.tracing.overrides @@ -3813,13 +4131,17 @@ The storage size for the minio pods | [nodeSelector](#specdistributionmodulestracingoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulestracingoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the module. + ## .spec.distribution.modules.tracing.overrides.ingresses ## .spec.distribution.modules.tracing.overrides.nodeSelector ### Description -The node selector to use to place the pods for the security module +Set to override the node selector used to place the pods of the module. ## .spec.distribution.modules.tracing.overrides.tolerations @@ -3834,13 +4156,13 @@ The node selector to use to place the pods for the security module ### Description -The tolerations that will be added to the pods for the monitoring module +Set to override the tolerations that will be added to the pods of the module. ## .spec.distribution.modules.tracing.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3858,7 +4180,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3882,15 +4204,19 @@ The value of the toleration | [overrides](#specdistributionmodulestracingtempooverrides) | `object` | Optional | | [retentionTime](#specdistributionmodulestracingtemporetentiontime) | `string` | Optional | +### Description + +Configuration for the Tempo package. + ## .spec.distribution.modules.tracing.tempo.backend ### Description -The backend for the tempo pods, must be ***minio*** or ***externalEndpoint*** +The storage backend type for Tempo. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external S3-compatible object storage instead of deploying an in-cluster MinIO. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3909,35 +4235,39 @@ The backend for the tempo pods, must be ***minio*** or ***externalEndpoint*** | [insecure](#specdistributionmodulestracingtempoexternalendpointinsecure) | `boolean` | Optional | | [secretAccessKey](#specdistributionmodulestracingtempoexternalendpointsecretaccesskey) | `string` | Optional | +### Description + +Configuration for Tempo's external storage backend. + ## .spec.distribution.modules.tracing.tempo.externalEndpoint.accessKeyId ### Description -The access key id of the external tempo backend +The access key ID (username) for the external S3-compatible bucket. ## .spec.distribution.modules.tracing.tempo.externalEndpoint.bucketName ### Description -The bucket name of the external tempo backend +The bucket name of the external S3-compatible object storage. ## .spec.distribution.modules.tracing.tempo.externalEndpoint.endpoint ### Description -The endpoint of the external tempo backend +The external S3-compatible endpoint for Tempo's storage. ## .spec.distribution.modules.tracing.tempo.externalEndpoint.insecure ### Description -If true, the external tempo backend will not use tls +If true, will use HTTP as protocol instead of HTTPS. ## .spec.distribution.modules.tracing.tempo.externalEndpoint.secretAccessKey ### Description -The secret access key of the external tempo backend +The secret access key (password) for the external S3-compatible bucket. ## .spec.distribution.modules.tracing.tempo.overrides @@ -3952,7 +4282,7 @@ The secret access key of the external tempo backend ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.tracing.tempo.overrides.tolerations @@ -3967,13 +4297,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.tracing.tempo.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3991,7 +4321,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -4008,17 +4338,19 @@ The value of the toleration ### Description -The retention time for the tempo pods +The retention time for the traces stored in Tempo. ## .spec.distribution.modules.tracing.type ### Description -The type of tracing to use, either ***none*** or ***tempo*** +The type of tracing to use, either `none` or `tempo`. `none` will disable the Tracing module and `tempo` will install a Grafana Tempo deployment. + +Default is `tempo`. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:--------| @@ -4027,6 +4359,10 @@ The type of tracing to use, either ***none*** or ***tempo*** ## .spec.distributionVersion +### Description + +Defines which KFD version will be installed and, in consequence, the Kubernetes version used to create the cluster. It supports git tags and branches. Example: `v1.30.1`. + ### Constraints **minimum length**: the minimum number of characters for this string is: `1` @@ -4053,14 +4389,15 @@ The type of tracing to use, either ***none*** or ***tempo*** ### Properties -| Property | Type | Required | -|:-----------------------------------------------|:---------|:---------| -| [chart](#specpluginshelmreleaseschart) | `string` | Required | -| [name](#specpluginshelmreleasesname) | `string` | Required | -| [namespace](#specpluginshelmreleasesnamespace) | `string` | Required | -| [set](#specpluginshelmreleasesset) | `array` | Optional | -| [values](#specpluginshelmreleasesvalues) | `array` | Optional | -| [version](#specpluginshelmreleasesversion) | `string` | Optional | +| Property | Type | Required | +|:---------------------------------------------------------------------------------|:----------|:---------| +| [chart](#specpluginshelmreleaseschart) | `string` | Required | +| [disableValidationOnInstall](#specpluginshelmreleasesdisablevalidationoninstall) | `boolean` | Optional | +| [name](#specpluginshelmreleasesname) | `string` | Required | +| [namespace](#specpluginshelmreleasesnamespace) | `string` | Required | +| [set](#specpluginshelmreleasesset) | `array` | Optional | +| [values](#specpluginshelmreleasesvalues) | `array` | Optional | +| [version](#specpluginshelmreleasesversion) | `string` | Optional | ## .spec.plugins.helm.releases.chart @@ -4068,6 +4405,12 @@ The type of tracing to use, either ***none*** or ***tempo*** The chart of the release +## .spec.plugins.helm.releases.disableValidationOnInstall + +### Description + +Disable running `helm diff` validation when installing the plugin, it will still be done when upgrading. + ## .spec.plugins.helm.releases.name ### Description diff --git a/docs/schemas/onpremises-kfd-v1alpha2.md b/docs/schemas/onpremises-kfd-v1alpha2.md index b30ecfb4e..9bb0ae9d0 100644 --- a/docs/schemas/onpremises-kfd-v1alpha2.md +++ b/docs/schemas/onpremises-kfd-v1alpha2.md @@ -2,8 +2,14 @@ This document explains the full schema for the `kind: OnPremises` for the `furyctl.yaml` file used by `furyctl`. This configuration file will be used to deploy the Kubernetes Fury Distribution modules and cluster on premises. -An example file can be found [here](https://github.com/sighupio/fury-distribution/blob/feature/schema-docs/templates/config/onpremises-kfd-v1alpha2.yaml.tpl). +An example configuration file can be created by running the following command: +```bash +furyctl create config --kind OnPremises --version v1.29.4 --name example-cluster +``` + +> [!NOTE] +> Replace the version with your desired version of KFD. ## Properties | Property | Type | Required | @@ -13,6 +19,10 @@ An example file can be found [here](https://github.com/sighupio/fury-distributio | [metadata](#metadata) | `object` | Required | | [spec](#spec) | `object` | Required | +### Description + +A KFD Cluster deployed on top of a set of existing VMs. + ## .apiVersion ### Constraints @@ -29,7 +39,7 @@ An example file can be found [here](https://github.com/sighupio/fury-distributio ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------| @@ -45,6 +55,10 @@ An example file can be found [here](https://github.com/sighupio/fury-distributio ## .metadata.name +### Description + +The name of the cluster. It will also be used as a prefix for all the other resources created. + ### Constraints **maximum length**: the maximum number of characters for this string is: `56` @@ -76,19 +90,30 @@ An example file can be found [here](https://github.com/sighupio/fury-distributio ### Properties -| Property | Type | Required | -|:----------------------------------------------------------------|:---------|:---------| -| [nodeSelector](#specdistributioncommonnodeselector) | `object` | Optional | -| [provider](#specdistributioncommonprovider) | `object` | Optional | -| [registry](#specdistributioncommonregistry) | `string` | Optional | -| [relativeVendorPath](#specdistributioncommonrelativevendorpath) | `string` | Optional | -| [tolerations](#specdistributioncommontolerations) | `array` | Optional | +| Property | Type | Required | +|:------------------------------------------------------------------------|:----------|:---------| +| [networkPoliciesEnabled](#specdistributioncommonnetworkpoliciesenabled) | `boolean` | Optional | +| [nodeSelector](#specdistributioncommonnodeselector) | `object` | Optional | +| [provider](#specdistributioncommonprovider) | `object` | Optional | +| [registry](#specdistributioncommonregistry) | `string` | Optional | +| [relativeVendorPath](#specdistributioncommonrelativevendorpath) | `string` | Optional | +| [tolerations](#specdistributioncommontolerations) | `array` | Optional | + +### Description + +Common configuration for all the distribution modules. + +## .spec.distribution.common.networkPoliciesEnabled + +### Description + +EXPERIMENTAL FEATURE. This field defines whether Network Policies are provided for core modules. ## .spec.distribution.common.nodeSelector ### Description -The node selector to use to place the pods for all the KFD modules +The node selector to use to place the pods for all the KFD modules. Follows Kubernetes selector format. Example: `node.kubernetes.io/role: infra`. ## .spec.distribution.common.provider @@ -102,19 +127,21 @@ The node selector to use to place the pods for all the KFD modules ### Description -The type of the provider +The provider type. Don't set. FOR INTERNAL USE ONLY. ## .spec.distribution.common.registry ### Description -URL of the registry where to pull images from for the Distribution phase. (Default is registry.sighup.io/fury). +URL of the registry where to pull images from for the Distribution phase. (Default is `registry.sighup.io/fury`). + +NOTE: If plugins are pulling from the default registry, the registry will be replaced for the plugin too. ## .spec.distribution.common.relativeVendorPath ### Description -The relative path to the vendor directory, does not need to be changed +The relative path to the vendor directory, does not need to be changed. ## .spec.distribution.common.tolerations @@ -129,13 +156,19 @@ The relative path to the vendor directory, does not need to be changed ### Description -The tolerations that will be added to the pods for all the KFD modules +An array with the tolerations that will be added to the pods for all the KFD modules. Follows Kubernetes tolerations format. Example: + +```yaml +- effect: NoSchedule + key: node.kubernetes.io/role + value: infra +``` ## .spec.distribution.common.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -153,7 +186,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -200,7 +233,7 @@ The behavior of the configmap ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:----------| @@ -408,7 +441,7 @@ The behavior of the secret ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:----------| @@ -515,11 +548,15 @@ The type of the secret | [pomerium](#specdistributionmodulesauthpomerium) | `object` | Optional | | [provider](#specdistributionmodulesauthprovider) | `object` | Required | +### Description + +Configuration for the Auth module. + ## .spec.distribution.modules.auth.baseDomain ### Description -The base domain for the auth module +The base domain for the ingresses created by the Auth module (Gangplank, Pomerium, Dex). Notice that when the ingress module type is `dual`, these will use the `external` ingress class. ## .spec.distribution.modules.auth.dex @@ -532,17 +569,32 @@ The base domain for the auth module | [expiry](#specdistributionmodulesauthdexexpiry) | `object` | Optional | | [overrides](#specdistributionmodulesauthdexoverrides) | `object` | Optional | +### Description + +Configuration for the Dex package. + ## .spec.distribution.modules.auth.dex.additionalStaticClients ### Description -The additional static clients for dex +Additional static clients defitions that will be added to the default clients included with the distribution in Dex's configuration. Example: + +```yaml +additionalStaticClients: + - id: my-custom-client + name: "A custom additional static client" + redirectURIs: + - "https://myapp.tld/redirect" + - "https://alias.tld/oidc-callback" + secret: supersecretpassword +``` +Reference: https://dexidp.io/docs/connectors/local/ ## .spec.distribution.modules.auth.dex.connectors ### Description -The connectors for dex +A list with each item defining a Dex connector. Follows Dex connectors configuration format: https://dexidp.io/docs/connectors/ ## .spec.distribution.modules.auth.dex.expiry @@ -578,7 +630,7 @@ Dex signing key expiration time duration (default 6h). ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.auth.dex.overrides.tolerations @@ -593,13 +645,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the minio module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.auth.dex.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -617,7 +669,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -650,55 +702,55 @@ The value of the toleration ### Description -The client ID for oidc kubernetes auth +The client ID that the Kubernetes API will use to authenticate against the OIDC provider (Dex). ## .spec.distribution.modules.auth.oidcKubernetesAuth.clientSecret ### Description -The client secret for oidc kubernetes auth +The client secret that the Kubernetes API will use to authenticate against the OIDC provider (Dex). ## .spec.distribution.modules.auth.oidcKubernetesAuth.emailClaim ### Description -The email claim for oidc kubernetes auth +DEPRECATED. Defaults to `email`. ## .spec.distribution.modules.auth.oidcKubernetesAuth.enabled ### Description -If true, oidc kubernetes auth will be enabled +If true, components needed for interacting with the Kubernetes API with OIDC authentication (Gangplank, Dex) be deployed and configued. ## .spec.distribution.modules.auth.oidcKubernetesAuth.namespace ### Description -The namespace to set in the context of the kubeconfig file +The namespace to set in the context of the kubeconfig file generated by Gangplank. Defaults to `default`. ## .spec.distribution.modules.auth.oidcKubernetesAuth.removeCAFromKubeconfig ### Description -Set to true to remove the CA from the kubeconfig file +Set to true to remove the CA from the kubeconfig file generated by Gangplank. ## .spec.distribution.modules.auth.oidcKubernetesAuth.scopes ### Description -The scopes for oidc kubernetes auth +Used to specify the scope of the requested Oauth authorization by Gangplank. Defaults to: `["openid", "profile", "email", "offline_access", "groups"]` ## .spec.distribution.modules.auth.oidcKubernetesAuth.sessionSecurityKey ### Description -The session security key for oidc kubernetes auth +The Key to use for the sessions in Gangplank. Must be different between different instances of Gangplank. ## .spec.distribution.modules.auth.oidcKubernetesAuth.usernameClaim ### Description -The username claim for oidc kubernetes auth +The JWT claim to use as the username. This is used in Gangplank's UI. This is combined with the clusterName for the user portion of the kubeconfig. Defaults to `nickname`. ## .spec.distribution.modules.auth.overrides @@ -710,13 +762,70 @@ The username claim for oidc kubernetes auth | [nodeSelector](#specdistributionmodulesauthoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulesauthoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the Auth module. + ## .spec.distribution.modules.auth.overrides.ingresses +### Properties + +| Property | Type | Required | +|:---------------------------------------------------------------------|:---------|:---------| +| [dex](#specdistributionmodulesauthoverridesingressesdex) | `object` | Optional | +| [gangplank](#specdistributionmodulesauthoverridesingressesgangplank) | `object` | Optional | + +### Description + +Override the definition of the Auth module ingresses. + +## .spec.distribution.modules.auth.overrides.ingresses.dex + +### Properties + +| Property | Type | Required | +|:------------------------------------------------------------------------------|:---------|:---------| +| [host](#specdistributionmodulesauthoverridesingressesdexhost) | `string` | Required | +| [ingressClass](#specdistributionmodulesauthoverridesingressesdexingressclass) | `string` | Required | + +## .spec.distribution.modules.auth.overrides.ingresses.dex.host + +### Description + +Use this host for the ingress instead of the default one. + +## .spec.distribution.modules.auth.overrides.ingresses.dex.ingressClass + +### Description + +Use this ingress class for the ingress instead of the default one. + +## .spec.distribution.modules.auth.overrides.ingresses.gangplank + +### Properties + +| Property | Type | Required | +|:------------------------------------------------------------------------------------|:---------|:---------| +| [host](#specdistributionmodulesauthoverridesingressesgangplankhost) | `string` | Required | +| [ingressClass](#specdistributionmodulesauthoverridesingressesgangplankingressclass) | `string` | Required | + +## .spec.distribution.modules.auth.overrides.ingresses.gangplank.host + +### Description + +Use this host for the ingress instead of the default one. + +## .spec.distribution.modules.auth.overrides.ingresses.gangplank.ingressClass + +### Description + +Use this ingress class for the ingress instead of the default one. + ## .spec.distribution.modules.auth.overrides.nodeSelector ### Description -The node selector to use to place the pods for the auth module +Set to override the node selector used to place the pods of the Auth module. ## .spec.distribution.modules.auth.overrides.tolerations @@ -731,13 +840,13 @@ The node selector to use to place the pods for the auth module ### Description -The tolerations that will be added to the pods for the auth module +Set to override the tolerations that will be added to the pods of the Auth module. ## .spec.distribution.modules.auth.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -755,7 +864,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -851,7 +960,7 @@ override default routes for KFD components ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -865,7 +974,7 @@ override default routes for KFD components ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -955,27 +1064,36 @@ cat ec_private.pem | base64 | [password](#specdistributionmodulesauthproviderbasicauthpassword) | `string` | Required | | [username](#specdistributionmodulesauthproviderbasicauthusername) | `string` | Required | +### Description + +Configuration for the HTTP Basic Auth provider. + ## .spec.distribution.modules.auth.provider.basicAuth.password ### Description -The password for the basic auth +The password for logging in with the HTTP basic authentication. ## .spec.distribution.modules.auth.provider.basicAuth.username ### Description -The username for the basic auth +The username for logging in with the HTTP basic authentication. ## .spec.distribution.modules.auth.provider.type ### Description -The type of the provider, must be ***none***, ***sso*** or ***basicAuth*** +The type of the Auth provider, options are: +- `none`: will disable authentication in the infrastructural ingresses. +- `sso`: will protect the infrastructural ingresses with Pomerium and Dex (SSO) and require authentication before accessing them. +- `basicAuth`: will protect the infrastructural ingresses with HTTP basic auth (username and password) authentication. + +Default is `none`. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:------------| @@ -993,6 +1111,10 @@ The type of the provider, must be ***none***, ***sso*** or ***basicAuth*** | [type](#specdistributionmodulesdrtype) | `string` | Required | | [velero](#specdistributionmodulesdrvelero) | `object` | Optional | +### Description + +Configuration for the Disaster Recovery module. + ## .spec.distribution.modules.dr.overrides ### Properties @@ -1003,13 +1125,17 @@ The type of the provider, must be ***none***, ***sso*** or ***basicAuth*** | [nodeSelector](#specdistributionmodulesdroverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulesdroverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the module. + ## .spec.distribution.modules.dr.overrides.ingresses ## .spec.distribution.modules.dr.overrides.nodeSelector ### Description -The node selector to use to place the pods for the tracing module +Set to override the node selector used to place the pods of the module. ## .spec.distribution.modules.dr.overrides.tolerations @@ -1024,13 +1150,13 @@ The node selector to use to place the pods for the tracing module ### Description -The tolerations that will be added to the pods for the policy module +Set to override the tolerations that will be added to the pods of the module. ## .spec.distribution.modules.dr.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1048,7 +1174,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1065,11 +1191,13 @@ The value of the toleration ### Description -The type of the DR, must be ***none*** or ***on-premises*** +The type of the Disaster Recovery, must be `none` or `on-premises`. `none` disables the module and `on-premises` will install Velero and an optional MinIO deployment. + +Default is `none`. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:--------------| @@ -1080,22 +1208,27 @@ The type of the DR, must be ***none*** or ***on-premises*** ### Properties -| Property | Type | Required | -|:---------------------------------------------------------------------|:---------|:---------| -| [backend](#specdistributionmodulesdrvelerobackend) | `string` | Optional | -| [externalEndpoint](#specdistributionmodulesdrveleroexternalendpoint) | `object` | Optional | -| [overrides](#specdistributionmodulesdrvelerooverrides) | `object` | Optional | -| [retentionTime](#specdistributionmodulesdrveleroretentiontime) | `string` | Optional | +| Property | Type | Required | +|:-------------------------------------------------------------------------|:---------|:---------| +| [backend](#specdistributionmodulesdrvelerobackend) | `string` | Optional | +| [externalEndpoint](#specdistributionmodulesdrveleroexternalendpoint) | `object` | Optional | +| [overrides](#specdistributionmodulesdrvelerooverrides) | `object` | Optional | +| [schedules](#specdistributionmodulesdrveleroschedules) | `object` | Optional | +| [snapshotController](#specdistributionmodulesdrvelerosnapshotcontroller) | `object` | Optional | + +### Description + +Configuration for the Velero package. ## .spec.distribution.modules.dr.velero.backend ### Description -The backend for velero +The storage backend type for Velero. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external S3-compatible object storage instead of deploying an in-cluster MinIO. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1114,35 +1247,39 @@ The backend for velero | [insecure](#specdistributionmodulesdrveleroexternalendpointinsecure) | `boolean` | Optional | | [secretAccessKey](#specdistributionmodulesdrveleroexternalendpointsecretaccesskey) | `string` | Optional | +### Description + +Configuration for Velero's external storage backend. + ## .spec.distribution.modules.dr.velero.externalEndpoint.accessKeyId ### Description -The access key id for velero backend +The access key ID (username) for the external S3-compatible bucket. ## .spec.distribution.modules.dr.velero.externalEndpoint.bucketName ### Description -The bucket name for velero backend +The bucket name of the external S3-compatible object storage. ## .spec.distribution.modules.dr.velero.externalEndpoint.endpoint ### Description -The endpoint for velero +External S3-compatible endpoint for Velero's storage. ## .spec.distribution.modules.dr.velero.externalEndpoint.insecure ### Description -If true, the endpoint will be insecure +If true, will use HTTP as protocol instead of HTTPS. ## .spec.distribution.modules.dr.velero.externalEndpoint.secretAccessKey ### Description -The secret access key for velero backend +The secret access key (password) for the external S3-compatible bucket. ## .spec.distribution.modules.dr.velero.overrides @@ -1157,7 +1294,7 @@ The secret access key for velero backend ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.dr.velero.overrides.tolerations @@ -1172,13 +1309,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the minio module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.dr.velero.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1196,7 +1333,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1209,11 +1346,112 @@ The key of the toleration The value of the toleration -## .spec.distribution.modules.dr.velero.retentionTime +## .spec.distribution.modules.dr.velero.schedules + +### Properties + +| Property | Type | Required | +|:--------------------------------------------------------------------|:----------|:---------| +| [definitions](#specdistributionmodulesdrveleroschedulesdefinitions) | `object` | Optional | +| [install](#specdistributionmodulesdrveleroschedulesinstall) | `boolean` | Optional | + +### Description + +Configuration for Velero's backup schedules. + +## .spec.distribution.modules.dr.velero.schedules.definitions + +### Properties + +| Property | Type | Required | +|:---------------------------------------------------------------------------|:---------|:---------| +| [full](#specdistributionmodulesdrveleroschedulesdefinitionsfull) | `object` | Optional | +| [manifests](#specdistributionmodulesdrveleroschedulesdefinitionsmanifests) | `object` | Optional | ### Description -The retention time for velero +Configuration for Velero schedules. + +## .spec.distribution.modules.dr.velero.schedules.definitions.full + +### Properties + +| Property | Type | Required | +|:---------------------------------------------------------------------------------------------|:----------|:---------| +| [schedule](#specdistributionmodulesdrveleroschedulesdefinitionsfullschedule) | `string` | Optional | +| [snapshotMoveData](#specdistributionmodulesdrveleroschedulesdefinitionsfullsnapshotmovedata) | `boolean` | Optional | +| [ttl](#specdistributionmodulesdrveleroschedulesdefinitionsfullttl) | `string` | Optional | + +### Description + +Configuration for Velero's manifests backup schedule. + +## .spec.distribution.modules.dr.velero.schedules.definitions.full.schedule + +### Description + +The cron expression for the `full` backup schedule (default `0 1 * * *`). + +## .spec.distribution.modules.dr.velero.schedules.definitions.full.snapshotMoveData + +### Description + +EXPERIMENTAL (if you do more than one backups, the following backups after the first are not automatically restorable, see https://github.com/vmware-tanzu/velero/issues/7057#issuecomment-2466815898 for the manual restore solution): SnapshotMoveData specifies whether snapshot data should be moved. Velero will create a new volume from the snapshot and upload the content to the storageLocation. + +## .spec.distribution.modules.dr.velero.schedules.definitions.full.ttl + +### Description + +The Time To Live (TTL) of the backups created by the backup schedules (default `720h0m0s`, 30 days). Notice that changing this value will affect only newly created backups, prior backups will keep the old TTL. + +## .spec.distribution.modules.dr.velero.schedules.definitions.manifests + +### Properties + +| Property | Type | Required | +|:----------------------------------------------------------------------------------|:---------|:---------| +| [schedule](#specdistributionmodulesdrveleroschedulesdefinitionsmanifestsschedule) | `string` | Optional | +| [ttl](#specdistributionmodulesdrveleroschedulesdefinitionsmanifeststtl) | `string` | Optional | + +### Description + +Configuration for Velero's manifests backup schedule. + +## .spec.distribution.modules.dr.velero.schedules.definitions.manifests.schedule + +### Description + +The cron expression for the `manifests` backup schedule (default `*/15 * * * *`). + +## .spec.distribution.modules.dr.velero.schedules.definitions.manifests.ttl + +### Description + +The Time To Live (TTL) of the backups created by the backup schedules (default `720h0m0s`, 30 days). Notice that changing this value will affect only newly created backups, prior backups will keep the old TTL. + +## .spec.distribution.modules.dr.velero.schedules.install + +### Description + +Whether to install or not the default `manifests` and `full` backups schedules. Default is `true`. + +## .spec.distribution.modules.dr.velero.snapshotController + +### Properties + +| Property | Type | Required | +|:---------------------------------------------------------------------|:----------|:---------| +| [install](#specdistributionmodulesdrvelerosnapshotcontrollerinstall) | `boolean` | Optional | + +### Description + +Configuration for the additional snapshotController component installation. + +## .spec.distribution.modules.dr.velero.snapshotController.install + +### Description + +Whether to install or not the snapshotController component in the cluster. Before enabling this field, check if your CSI driver does not have snapshotController built-in. ## .spec.distribution.modules.ingress @@ -1232,7 +1470,7 @@ The retention time for velero ### Description -the base domain used for all the KFD ingresses, if in the nginx dual configuration, it should be the same as the .spec.distribution.modules.ingress.dns.private.name zone +The base domain used for all the KFD infrastructural ingresses. If using the nginx `dual` type, this value should be the same as the domain associated with the `internal` ingress class. ## .spec.distribution.modules.ingress.certManager @@ -1243,6 +1481,10 @@ the base domain used for all the KFD ingresses, if in the nginx dual configurati | [clusterIssuer](#specdistributionmodulesingresscertmanagerclusterissuer) | `object` | Required | | [overrides](#specdistributionmodulesingresscertmanageroverrides) | `object` | Optional | +### Description + +Configuration for the cert-manager package. Required even if `ingress.nginx.type` is `none`, cert-manager is used for managing other certificates in the cluster besides the TLS termination certificates for the ingresses. + ## .spec.distribution.modules.ingress.certManager.clusterIssuer ### Properties @@ -1254,33 +1496,37 @@ the base domain used for all the KFD ingresses, if in the nginx dual configurati | [solvers](#specdistributionmodulesingresscertmanagerclusterissuersolvers) | `array` | Optional | | [type](#specdistributionmodulesingresscertmanagerclusterissuertype) | `string` | Optional | +### Description + +Configuration for the cert-manager's ACME clusterIssuer used to request certificates from Let's Encrypt. + ## .spec.distribution.modules.ingress.certManager.clusterIssuer.email ### Description -The email of the cluster issuer +The email address to use during the certificate issuing process. ## .spec.distribution.modules.ingress.certManager.clusterIssuer.name ### Description -The name of the cluster issuer +The name of the clusterIssuer. ## .spec.distribution.modules.ingress.certManager.clusterIssuer.solvers ### Description -The custom solvers configurations +The list of challenge solvers to use instead of the default one for the `http01` challenge. Check [cert manager's documentation](https://cert-manager.io/docs/configuration/acme/#adding-multiple-solver-types) for examples for this field. ## .spec.distribution.modules.ingress.certManager.clusterIssuer.type ### Description -The type of the cluster issuer, must be ***http01*** +The type of the clusterIssuer. Only `http01` challenge is supported for on-premises clusters. See solvers for arbitrary configurations. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1299,7 +1545,7 @@ The type of the cluster issuer, must be ***http01*** ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.ingress.certManager.overrides.tolerations @@ -1314,13 +1560,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the minio module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.ingress.certManager.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1338,7 +1584,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1372,7 +1618,7 @@ The value of the toleration ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.ingress.forecastle.overrides.tolerations @@ -1387,13 +1633,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the minio module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.ingress.forecastle.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1411,7 +1657,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1436,7 +1682,7 @@ The value of the toleration ### Description -Configurations for the nginx ingress controller module +Configurations for the Ingress nginx controller package. ## .spec.distribution.modules.ingress.nginx.overrides @@ -1451,7 +1697,7 @@ Configurations for the nginx ingress controller module ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.ingress.nginx.overrides.tolerations @@ -1466,13 +1712,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the minio module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.ingress.nginx.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1490,7 +1736,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1516,11 +1762,11 @@ The value of the toleration ### Description -The provider of the TLS certificate, must be ***none***, ***certManager*** or ***secret*** +The provider of the TLS certificates for the ingresses, one of: `none`, `certManager`, or `secret`. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:--------------| @@ -1538,25 +1784,42 @@ The provider of the TLS certificate, must be ***none***, ***certManager*** or ** | [cert](#specdistributionmodulesingressnginxtlssecretcert) | `string` | Required | | [key](#specdistributionmodulesingressnginxtlssecretkey) | `string` | Required | +### Description + +Kubernetes TLS secret for the ingresses TLS certificate. + ## .spec.distribution.modules.ingress.nginx.tls.secret.ca +### Description + +The Certificate Authority certificate file's content. You can use the `"{file://}"` notation to get the content from a file. + ## .spec.distribution.modules.ingress.nginx.tls.secret.cert ### Description -The certificate file content or you can use the file notation to get the content from a file +The certificate file's content. You can use the `"{file://}"` notation to get the content from a file. ## .spec.distribution.modules.ingress.nginx.tls.secret.key +### Description + +The signing key file's content. You can use the `"{file://}"` notation to get the content from a file. + ## .spec.distribution.modules.ingress.nginx.type ### Description -The type of the nginx ingress controller, must be ***none***, ***single*** or ***dual*** +The type of the Ingress nginx controller, options are: +- `none`: no ingress controller will be installed and no infrastructural ingresses will be created. +- `single`: a single ingress controller with ingress class `nginx` will be installed to manage all the ingress resources, infrastructural ingresses will be created. +- `dual`: two independent ingress controllers will be installed, one for the `internal` ingress class intended for private ingresses and one for the `external` ingress class intended for public ingresses. KFD infrastructural ingresses wil use the `internal` ingress class when using the dual type. + +Default is `single`. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1574,6 +1837,10 @@ The type of the nginx ingress controller, must be ***none***, ***single*** or ** | [nodeSelector](#specdistributionmodulesingressoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulesingressoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the Ingress module. + ## .spec.distribution.modules.ingress.overrides.ingresses ### Properties @@ -1596,25 +1863,25 @@ The type of the nginx ingress controller, must be ***none***, ***single*** or ** ### Description -If true, the ingress will not have authentication +If true, the ingress will not have authentication even if `.spec.modules.auth.provider.type` is SSO or Basic Auth. ## .spec.distribution.modules.ingress.overrides.ingresses.forecastle.host ### Description -The host of the ingress +Use this host for the ingress instead of the default one. ## .spec.distribution.modules.ingress.overrides.ingresses.forecastle.ingressClass ### Description -The ingress class of the ingress +Use this ingress class for the ingress instead of the default one. ## .spec.distribution.modules.ingress.overrides.nodeSelector ### Description -The node selector to use to place the pods for the ingress module +Set to override the node selector used to place the pods of the Ingress module. ## .spec.distribution.modules.ingress.overrides.tolerations @@ -1629,13 +1896,13 @@ The node selector to use to place the pods for the ingress module ### Description -The tolerations that will be added to the pods for the ingress module +Set to override the tolerations that will be added to the pods of the Ingress module. ## .spec.distribution.modules.ingress.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1653,7 +1920,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1683,6 +1950,10 @@ The value of the toleration | [overrides](#specdistributionmodulesloggingoverrides) | `object` | Optional | | [type](#specdistributionmodulesloggingtype) | `string` | Required | +### Description + +Configuration for the Logging module. + ## .spec.distribution.modules.logging.cerebro ### Properties @@ -1691,6 +1962,10 @@ The value of the toleration |:-------------------------------------------------------------|:---------|:---------| | [overrides](#specdistributionmodulesloggingcerebrooverrides) | `object` | Optional | +### Description + +DEPRECATED since KFD v1.26.6, 1.27.5, v1.28.0. + ## .spec.distribution.modules.logging.cerebro.overrides ### Properties @@ -1704,7 +1979,7 @@ The value of the toleration ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.logging.cerebro.overrides.tolerations @@ -1719,13 +1994,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the minio module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.logging.cerebro.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1743,7 +2018,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1773,55 +2048,55 @@ The value of the toleration ### Description -when using the customOutputs logging type, you need to manually specify the spec of the several Output and ClusterOutputs that the Logging Operator expects to forward the logs collected by the pre-defined flows. +When using the `customOutputs` logging type, you need to manually specify the spec of the several `Output` and `ClusterOutputs` that the Logging Operator expects to forward the logs collected by the pre-defined flows. ## .spec.distribution.modules.logging.customOutputs.audit ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `audit` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.errors ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `errors` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.events ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `events` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.infra ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `infra` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.ingressNginx ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `ingressNginx` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.kubernetes ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `kubernetes` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.systemdCommon ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `systemdCommon` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.systemdEtcd ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `systemdEtcd` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.loki @@ -1832,12 +2107,21 @@ This value defines where the output from Flow will be sent. Will be the `spec` s | [backend](#specdistributionmoduleslogginglokibackend) | `string` | Optional | | [externalEndpoint](#specdistributionmoduleslogginglokiexternalendpoint) | `object` | Optional | | [resources](#specdistributionmoduleslogginglokiresources) | `object` | Optional | +| [tsdbStartDate](#specdistributionmoduleslogginglokitsdbstartdate) | `string` | Required | + +### Description + +Configuration for the Loki package. ## .spec.distribution.modules.logging.loki.backend +### Description + +The storage backend type for Loki. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external object storage instead of deploying an in-cluster MinIO. + ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1856,35 +2140,39 @@ This value defines where the output from Flow will be sent. Will be the `spec` s | [insecure](#specdistributionmoduleslogginglokiexternalendpointinsecure) | `boolean` | Optional | | [secretAccessKey](#specdistributionmoduleslogginglokiexternalendpointsecretaccesskey) | `string` | Optional | +### Description + +Configuration for Loki's external storage backend. + ## .spec.distribution.modules.logging.loki.externalEndpoint.accessKeyId ### Description -The access key id of the loki external endpoint +The access key ID (username) for the external S3-compatible bucket. ## .spec.distribution.modules.logging.loki.externalEndpoint.bucketName ### Description -The bucket name of the loki external endpoint +The bucket name of the external S3-compatible object storage. ## .spec.distribution.modules.logging.loki.externalEndpoint.endpoint ### Description -The endpoint of the loki external endpoint +External S3-compatible endpoint for Loki's storage. ## .spec.distribution.modules.logging.loki.externalEndpoint.insecure ### Description -If true, the loki external endpoint will be insecure +If true, will use HTTP as protocol instead of HTTPS. ## .spec.distribution.modules.logging.loki.externalEndpoint.secretAccessKey ### Description -The secret access key of the loki external endpoint +The secret access key (password) for the external S3-compatible bucket. ## .spec.distribution.modules.logging.loki.resources @@ -1908,13 +2196,13 @@ The secret access key of the loki external endpoint ### Description -The cpu limit for the loki pods +The CPU limit for the Pod. Example: `1000m`. ## .spec.distribution.modules.logging.loki.resources.limits.memory ### Description -The memory limit for the prometheus pods +The memory limit for the Pod. Example: `1G`. ## .spec.distribution.modules.logging.loki.resources.requests @@ -1929,13 +2217,23 @@ The memory limit for the prometheus pods ### Description -The cpu request for the loki pods +The CPU request for the Pod, in cores. Example: `500m`. ## .spec.distribution.modules.logging.loki.resources.requests.memory ### Description -The memory request for the prometheus pods +The memory request for the Pod. Example: `500M`. + +## .spec.distribution.modules.logging.loki.tsdbStartDate + +### Description + +Starting from versions 1.28.4, 1.29.5 and 1.30.0 of KFD, Loki will change the time series database from BoltDB to TSDB and the schema from v11 to v13 that it uses to store the logs. + +The value of this field will determine the date when Loki will start writing using the new TSDB and the schema v13, always at midnight UTC. The old BoltDB and schema will be kept until they expire for reading purposes. + +Value must be a string in `ISO 8601` date format (`yyyy-mm-dd`). Example: `2024-11-18`. ## .spec.distribution.modules.logging.minio @@ -1947,6 +2245,10 @@ The memory request for the prometheus pods | [rootUser](#specdistributionmodulesloggingminiorootuser) | `object` | Optional | | [storageSize](#specdistributionmodulesloggingminiostoragesize) | `string` | Optional | +### Description + +Configuration for Logging's MinIO deployment. + ## .spec.distribution.modules.logging.minio.overrides ### Properties @@ -1960,7 +2262,7 @@ The memory request for the prometheus pods ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.logging.minio.overrides.tolerations @@ -1975,13 +2277,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the minio module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.logging.minio.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1999,7 +2301,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2025,19 +2327,19 @@ The value of the toleration ### Description -The password of the minio root user +The password for the default MinIO root user. ## .spec.distribution.modules.logging.minio.rootUser.username ### Description -The username of the minio root user +The username for the default MinIO root user. ## .spec.distribution.modules.logging.minio.storageSize ### Description -The PVC size for each minio disk, 6 disks total +The PVC size for each MinIO disk, 6 disks total. ## .spec.distribution.modules.logging.opensearch @@ -2063,7 +2365,7 @@ The PVC size for each minio disk, 6 disks total ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.logging.opensearch.overrides.tolerations @@ -2078,13 +2380,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the minio module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.logging.opensearch.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2102,7 +2404,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2137,13 +2439,13 @@ The value of the toleration ### Description -The cpu limit for the loki pods +The CPU limit for the Pod. Example: `1000m`. ## .spec.distribution.modules.logging.opensearch.resources.limits.memory ### Description -The memory limit for the prometheus pods +The memory limit for the Pod. Example: `1G`. ## .spec.distribution.modules.logging.opensearch.resources.requests @@ -2158,29 +2460,29 @@ The memory limit for the prometheus pods ### Description -The cpu request for the loki pods +The CPU request for the Pod, in cores. Example: `500m`. ## .spec.distribution.modules.logging.opensearch.resources.requests.memory ### Description -The memory request for the prometheus pods +The memory request for the Pod. Example: `500M`. ## .spec.distribution.modules.logging.opensearch.storageSize ### Description -The storage size for the opensearch pods +The storage size for the OpenSearch volumes. Follows Kubernetes resources storage requests. Default is `150Gi`. ## .spec.distribution.modules.logging.opensearch.type ### Description -The type of the opensearch, must be ***single*** or ***triple*** +The type of OpenSearch deployment. One of: `single` for a single replica or `triple` for an HA 3-replicas deployment. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2195,6 +2497,10 @@ The type of the opensearch, must be ***single*** or ***triple*** |:--------------------------------------------------------------|:---------|:---------| | [overrides](#specdistributionmodulesloggingoperatoroverrides) | `object` | Optional | +### Description + +Configuration for the Logging Operator. + ## .spec.distribution.modules.logging.operator.overrides ### Properties @@ -2208,7 +2514,7 @@ The type of the opensearch, must be ***single*** or ***triple*** ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.logging.operator.overrides.tolerations @@ -2223,13 +2529,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the minio module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.logging.operator.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2247,7 +2553,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2270,13 +2576,17 @@ The value of the toleration | [nodeSelector](#specdistributionmodulesloggingoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulesloggingoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the module. + ## .spec.distribution.modules.logging.overrides.ingresses ## .spec.distribution.modules.logging.overrides.nodeSelector ### Description -The node selector to use to place the pods for the tracing module +Set to override the node selector used to place the pods of the module. ## .spec.distribution.modules.logging.overrides.tolerations @@ -2291,13 +2601,13 @@ The node selector to use to place the pods for the tracing module ### Description -The tolerations that will be added to the pods for the policy module +Set to override the tolerations that will be added to the pods of the module. ## .spec.distribution.modules.logging.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2315,7 +2625,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2332,11 +2642,17 @@ The value of the toleration ### Description -selects the logging stack. Choosing none will disable the centralized logging. Choosing opensearch will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored. Choosing loki will use a distributed Grafana Loki instead of OpenSearh for storage. Choosing customOuput the Logging Operator will be deployed and installed but with no local storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage. +Selects the logging stack. Options are: +- `none`: will disable the centralized logging. +- `opensearch`: will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored. +- `loki`: will use a distributed Grafana Loki instead of OpenSearch for storage. +- `customOuputs`: the Logging Operator will be deployed and installed but without in-cluster storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage. + +Default is `opensearch`. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:----------------| @@ -2365,7 +2681,7 @@ selects the logging stack. Choosing none will disable the centralized logging. C ### Description -configuration for the Monitoring module components +Configuration for the Monitoring module. ## .spec.distribution.modules.monitoring.alertmanager @@ -2381,19 +2697,19 @@ configuration for the Monitoring module components ### Description -The webhook url to send deadman switch monitoring, for example to use with healthchecks.io +The webhook URL to send dead man's switch monitoring, for example to use with healthchecks.io. ## .spec.distribution.modules.monitoring.alertmanager.installDefaultRules ### Description -If true, the default rules will be installed +Set to false to avoid installing the Prometheus rules (alerts) included with the distribution. ## .spec.distribution.modules.monitoring.alertmanager.slackWebhookUrl ### Description -The slack webhook url to send alerts +The Slack webhook URL where to send the infrastructural and workload alerts to. ## .spec.distribution.modules.monitoring.blackboxExporter @@ -2416,7 +2732,7 @@ The slack webhook url to send alerts ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.monitoring.blackboxExporter.overrides.tolerations @@ -2431,13 +2747,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the minio module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.monitoring.blackboxExporter.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2455,7 +2771,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2499,7 +2815,7 @@ Notice that by default anonymous access is enabled. ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.monitoring.grafana.overrides.tolerations @@ -2514,13 +2830,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the minio module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.monitoring.grafana.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2538,7 +2854,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2584,7 +2900,7 @@ More details in [Grafana's documentation](https://grafana.com/docs/grafana/lates ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.monitoring.kubeStateMetrics.overrides.tolerations @@ -2599,13 +2915,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the minio module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.monitoring.kubeStateMetrics.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2623,7 +2939,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2647,15 +2963,19 @@ The value of the toleration | [overrides](#specdistributionmodulesmonitoringmimiroverrides) | `object` | Optional | | [retentionTime](#specdistributionmodulesmonitoringmimirretentiontime) | `string` | Optional | +### Description + +Configuration for the Mimir package. + ## .spec.distribution.modules.monitoring.mimir.backend ### Description -The backend for the mimir pods, must be ***minio*** or ***externalEndpoint*** +The storage backend type for Mimir. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external S3-compatible object storage instead of deploying an in-cluster MinIO. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2674,35 +2994,39 @@ The backend for the mimir pods, must be ***minio*** or ***externalEndpoint*** | [insecure](#specdistributionmodulesmonitoringmimirexternalendpointinsecure) | `boolean` | Optional | | [secretAccessKey](#specdistributionmodulesmonitoringmimirexternalendpointsecretaccesskey) | `string` | Optional | +### Description + +Configuration for Mimir's external storage backend. + ## .spec.distribution.modules.monitoring.mimir.externalEndpoint.accessKeyId ### Description -The access key id of the external mimir backend +The access key ID (username) for the external S3-compatible bucket. ## .spec.distribution.modules.monitoring.mimir.externalEndpoint.bucketName ### Description -The bucket name of the external mimir backend +The bucket name of the external S3-compatible object storage. ## .spec.distribution.modules.monitoring.mimir.externalEndpoint.endpoint ### Description -The endpoint of the external mimir backend +The external S3-compatible endpoint for Mimir's storage. ## .spec.distribution.modules.monitoring.mimir.externalEndpoint.insecure ### Description -If true, the external mimir backend will not use tls +If true, will use HTTP as protocol instead of HTTPS. ## .spec.distribution.modules.monitoring.mimir.externalEndpoint.secretAccessKey ### Description -The secret access key of the external mimir backend +The secret access key (password) for the external S3-compatible bucket. ## .spec.distribution.modules.monitoring.mimir.overrides @@ -2717,7 +3041,7 @@ The secret access key of the external mimir backend ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.monitoring.mimir.overrides.tolerations @@ -2732,13 +3056,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the minio module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.monitoring.mimir.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2756,7 +3080,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2773,7 +3097,7 @@ The value of the toleration ### Description -The retention time for the mimir pods +The retention time for the logs stored in Mimir. Default is `30d`. Value must match the regular expression `[0-9]+(ns|us|µs|ms|s|m|h|d|w|y)` where y = 365 days. ## .spec.distribution.modules.monitoring.minio @@ -2785,6 +3109,10 @@ The retention time for the mimir pods | [rootUser](#specdistributionmodulesmonitoringminiorootuser) | `object` | Optional | | [storageSize](#specdistributionmodulesmonitoringminiostoragesize) | `string` | Optional | +### Description + +Configuration for Monitoring's MinIO deployment. + ## .spec.distribution.modules.monitoring.minio.overrides ### Properties @@ -2798,7 +3126,7 @@ The retention time for the mimir pods ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.monitoring.minio.overrides.tolerations @@ -2813,13 +3141,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the minio module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.monitoring.minio.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2837,7 +3165,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2863,19 +3191,19 @@ The value of the toleration ### Description -The password for the minio root user +The password for the default MinIO root user. ## .spec.distribution.modules.monitoring.minio.rootUser.username ### Description -The username for the minio root user +The username for the default MinIO root user. ## .spec.distribution.modules.monitoring.minio.storageSize ### Description -The storage size for the minio pods +The PVC size for each MinIO disk, 6 disks total. ## .spec.distribution.modules.monitoring.overrides @@ -2887,13 +3215,17 @@ The storage size for the minio pods | [nodeSelector](#specdistributionmodulesmonitoringoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulesmonitoringoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the module. + ## .spec.distribution.modules.monitoring.overrides.ingresses ## .spec.distribution.modules.monitoring.overrides.nodeSelector ### Description -The node selector to use to place the pods for the tracing module +Set to override the node selector used to place the pods of the module. ## .spec.distribution.modules.monitoring.overrides.tolerations @@ -2908,13 +3240,13 @@ The node selector to use to place the pods for the tracing module ### Description -The tolerations that will be added to the pods for the policy module +Set to override the tolerations that will be added to the pods of the module. ## .spec.distribution.modules.monitoring.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2932,7 +3264,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2987,13 +3319,13 @@ Set this option to ship the collected metrics to a remote Prometheus receiver. ### Description -The cpu limit for the loki pods +The CPU limit for the Pod. Example: `1000m`. ## .spec.distribution.modules.monitoring.prometheus.resources.limits.memory ### Description -The memory limit for the prometheus pods +The memory limit for the Pod. Example: `1G`. ## .spec.distribution.modules.monitoring.prometheus.resources.requests @@ -3008,31 +3340,31 @@ The memory limit for the prometheus pods ### Description -The cpu request for the loki pods +The CPU request for the Pod, in cores. Example: `500m`. ## .spec.distribution.modules.monitoring.prometheus.resources.requests.memory ### Description -The memory request for the prometheus pods +The memory request for the Pod. Example: `500M`. ## .spec.distribution.modules.monitoring.prometheus.retentionSize ### Description -The retention size for the k8s Prometheus instance. +The retention size for the `k8s` Prometheus instance. ## .spec.distribution.modules.monitoring.prometheus.retentionTime ### Description -The retention time for the k8s Prometheus instance. +The retention time for the `k8s` Prometheus instance. ## .spec.distribution.modules.monitoring.prometheus.storageSize ### Description -The storage size for the k8s Prometheus instance. +The storage size for the `k8s` Prometheus instance. ## .spec.distribution.modules.monitoring.prometheusAgent @@ -3073,13 +3405,13 @@ Set this option to ship the collected metrics to a remote Prometheus receiver. ### Description -The cpu limit for the loki pods +The CPU limit for the Pod. Example: `1000m`. ## .spec.distribution.modules.monitoring.prometheusAgent.resources.limits.memory ### Description -The memory limit for the prometheus pods +The memory limit for the Pod. Example: `1G`. ## .spec.distribution.modules.monitoring.prometheusAgent.resources.requests @@ -3094,28 +3426,30 @@ The memory limit for the prometheus pods ### Description -The cpu request for the loki pods +The CPU request for the Pod, in cores. Example: `500m`. ## .spec.distribution.modules.monitoring.prometheusAgent.resources.requests.memory ### Description -The memory request for the prometheus pods +The memory request for the Pod. Example: `500M`. ## .spec.distribution.modules.monitoring.type ### Description -The type of the monitoring, must be ***none***, ***prometheus***, ***prometheusAgent*** or ***mimir***. +The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or `mimir`. - `none`: will disable the whole monitoring stack. -- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instace, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more. -- `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster. -- `mimir`: will install the same as the `prometheus` option, and in addition Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage. +- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instance, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more. +- `prometheusAgent`: will install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster. +- `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage. + +Default is `prometheus`. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:------------------| @@ -3145,7 +3479,7 @@ The type of the monitoring, must be ***none***, ***prometheus***, ***prometheusA ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.monitoring.x509Exporter.overrides.tolerations @@ -3160,13 +3494,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the minio module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.monitoring.x509Exporter.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3184,7 +3518,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3208,6 +3542,10 @@ The value of the toleration | [tigeraOperator](#specdistributionmodulesnetworkingtigeraoperator) | `object` | Optional | | [type](#specdistributionmodulesnetworkingtype) | `string` | Required | +### Description + +Configuration for the Networking module. + ## .spec.distribution.modules.networking.cilium ### Properties @@ -3222,7 +3560,7 @@ The value of the toleration ### Description -The mask size to use for the cilium pods +The mask size to use for the Pods network on each node. ## .spec.distribution.modules.networking.cilium.overrides @@ -3237,7 +3575,7 @@ The mask size to use for the cilium pods ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.networking.cilium.overrides.tolerations @@ -3252,13 +3590,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the minio module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.networking.cilium.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3276,7 +3614,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3293,7 +3631,7 @@ The value of the toleration ### Description -The pod cidr to use for the cilium pods +Allows specifing a CIDR for the Pods network different from `.spec.kubernetes.podCidr`. If not set the default is to use `.spec.kubernetes.podCidr`. ### Constraints @@ -3315,13 +3653,17 @@ The pod cidr to use for the cilium pods | [nodeSelector](#specdistributionmodulesnetworkingoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulesnetworkingoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the module. + ## .spec.distribution.modules.networking.overrides.ingresses ## .spec.distribution.modules.networking.overrides.nodeSelector ### Description -The node selector to use to place the pods for the tracing module +Set to override the node selector used to place the pods of the module. ## .spec.distribution.modules.networking.overrides.tolerations @@ -3336,13 +3678,13 @@ The node selector to use to place the pods for the tracing module ### Description -The tolerations that will be added to the pods for the policy module +Set to override the tolerations that will be added to the pods of the module. ## .spec.distribution.modules.networking.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3360,7 +3702,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3394,7 +3736,7 @@ The value of the toleration ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.networking.tigeraOperator.overrides.tolerations @@ -3409,13 +3751,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the minio module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.networking.tigeraOperator.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3433,7 +3775,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3450,11 +3792,11 @@ The value of the toleration ### Description -The type of networking to use, either ***calico*** or ***cilium*** +The type of CNI plugin to use, either `calico` (Tigera Operator) or `cilium`. Default is `calico`. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3472,6 +3814,10 @@ The type of networking to use, either ***calico*** or ***cilium*** | [overrides](#specdistributionmodulespolicyoverrides) | `object` | Optional | | [type](#specdistributionmodulespolicytype) | `string` | Required | +### Description + +Configuration for the Policy module. + ## .spec.distribution.modules.policy.gatekeeper ### Properties @@ -3483,6 +3829,10 @@ The type of networking to use, either ***calico*** or ***cilium*** | [installDefaultPolicies](#specdistributionmodulespolicygatekeeperinstalldefaultpolicies) | `boolean` | Required | | [overrides](#specdistributionmodulespolicygatekeeperoverrides) | `object` | Optional | +### Description + +Configuration for the Gatekeeper package. + ## .spec.distribution.modules.policy.gatekeeper.additionalExcludedNamespaces ### Description @@ -3493,11 +3843,11 @@ This parameter adds namespaces to Gatekeeper's exemption list, so it will not en ### Description -The enforcement action to use for the gatekeeper module +The default enforcement action to use for the included constraints. `deny` will block the admission when violations to the policies are found, `warn` will show a message to the user but will admit the violating requests and `dryrun` won't give any feedback to the user but it will log the violations. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3509,7 +3859,7 @@ The enforcement action to use for the gatekeeper module ### Description -If true, the default policies will be installed +Set to `false` to avoid installing the default Gatekeeper policies (constraints templates and constraints) included with the distribution. ## .spec.distribution.modules.policy.gatekeeper.overrides @@ -3524,7 +3874,7 @@ If true, the default policies will be installed ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.policy.gatekeeper.overrides.tolerations @@ -3539,13 +3889,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the minio module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.policy.gatekeeper.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3563,7 +3913,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3587,17 +3937,21 @@ The value of the toleration | [overrides](#specdistributionmodulespolicykyvernooverrides) | `object` | Optional | | [validationFailureAction](#specdistributionmodulespolicykyvernovalidationfailureaction) | `string` | Required | +### Description + +Configuration for the Kyverno package. + ## .spec.distribution.modules.policy.kyverno.additionalExcludedNamespaces ### Description -This parameter adds namespaces to Kyverno's exemption list, so it will not enforce the constraints on them. +This parameter adds namespaces to Kyverno's exemption list, so it will not enforce the policies on them. ## .spec.distribution.modules.policy.kyverno.installDefaultPolicies ### Description -If true, the default policies will be installed +Set to `false` to avoid installing the default Kyverno policies included with distribution. ## .spec.distribution.modules.policy.kyverno.overrides @@ -3612,7 +3966,7 @@ If true, the default policies will be installed ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.policy.kyverno.overrides.tolerations @@ -3627,13 +3981,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the minio module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.policy.kyverno.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3651,7 +4005,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3668,16 +4022,16 @@ The value of the toleration ### Description -The validation failure action to use for the kyverno module +The validation failure action to use for the policies, `Enforce` will block when a request does not comply with the policies and `Audit` will not block but log when a request does not comply with the policies. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:----------| -|`"audit"` | -|`"enforce"`| +|`"Audit"` | +|`"Enforce"`| ## .spec.distribution.modules.policy.overrides @@ -3689,13 +4043,17 @@ The validation failure action to use for the kyverno module | [nodeSelector](#specdistributionmodulespolicyoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulespolicyoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the module. + ## .spec.distribution.modules.policy.overrides.ingresses ## .spec.distribution.modules.policy.overrides.nodeSelector ### Description -The node selector to use to place the pods for the tracing module +Set to override the node selector used to place the pods of the module. ## .spec.distribution.modules.policy.overrides.tolerations @@ -3710,13 +4068,13 @@ The node selector to use to place the pods for the tracing module ### Description -The tolerations that will be added to the pods for the policy module +Set to override the tolerations that will be added to the pods of the module. ## .spec.distribution.modules.policy.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3734,7 +4092,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3751,11 +4109,13 @@ The value of the toleration ### Description -The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno*** +The type of policy enforcement to use, either `none`, `gatekeeper` or `kyverno`. + +Default is `none`. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------| @@ -3774,6 +4134,10 @@ The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno** | [tempo](#specdistributionmodulestracingtempo) | `object` | Optional | | [type](#specdistributionmodulestracingtype) | `string` | Required | +### Description + +Configuration for the Tracing module. + ## .spec.distribution.modules.tracing.minio ### Properties @@ -3784,6 +4148,10 @@ The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno** | [rootUser](#specdistributionmodulestracingminiorootuser) | `object` | Optional | | [storageSize](#specdistributionmodulestracingminiostoragesize) | `string` | Optional | +### Description + +Configuration for Tracing's MinIO deployment. + ## .spec.distribution.modules.tracing.minio.overrides ### Properties @@ -3797,7 +4165,7 @@ The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno** ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.tracing.minio.overrides.tolerations @@ -3812,13 +4180,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the minio module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.tracing.minio.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3836,7 +4204,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3862,19 +4230,19 @@ The value of the toleration ### Description -The password for the minio root user +The password for the default MinIO root user. ## .spec.distribution.modules.tracing.minio.rootUser.username ### Description -The username for the minio root user +The username for the default MinIO root user. ## .spec.distribution.modules.tracing.minio.storageSize ### Description -The storage size for the minio pods +The PVC size for each MinIO disk, 6 disks total. ## .spec.distribution.modules.tracing.overrides @@ -3886,13 +4254,17 @@ The storage size for the minio pods | [nodeSelector](#specdistributionmodulestracingoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulestracingoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the module. + ## .spec.distribution.modules.tracing.overrides.ingresses ## .spec.distribution.modules.tracing.overrides.nodeSelector ### Description -The node selector to use to place the pods for the tracing module +Set to override the node selector used to place the pods of the module. ## .spec.distribution.modules.tracing.overrides.tolerations @@ -3907,13 +4279,13 @@ The node selector to use to place the pods for the tracing module ### Description -The tolerations that will be added to the pods for the policy module +Set to override the tolerations that will be added to the pods of the module. ## .spec.distribution.modules.tracing.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3931,7 +4303,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3955,15 +4327,19 @@ The value of the toleration | [overrides](#specdistributionmodulestracingtempooverrides) | `object` | Optional | | [retentionTime](#specdistributionmodulestracingtemporetentiontime) | `string` | Optional | +### Description + +Configuration for the Tempo package. + ## .spec.distribution.modules.tracing.tempo.backend ### Description -The backend for the tempo pods, must be ***minio*** or ***externalEndpoint*** +The storage backend type for Tempo. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external S3-compatible object storage instead of deploying an in-cluster MinIO. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3982,35 +4358,39 @@ The backend for the tempo pods, must be ***minio*** or ***externalEndpoint*** | [insecure](#specdistributionmodulestracingtempoexternalendpointinsecure) | `boolean` | Optional | | [secretAccessKey](#specdistributionmodulestracingtempoexternalendpointsecretaccesskey) | `string` | Optional | +### Description + +Configuration for Tempo's external storage backend. + ## .spec.distribution.modules.tracing.tempo.externalEndpoint.accessKeyId ### Description -The access key id of the external tempo backend +The access key ID (username) for the external S3-compatible bucket. ## .spec.distribution.modules.tracing.tempo.externalEndpoint.bucketName ### Description -The bucket name of the external tempo backend +The bucket name of the external S3-compatible object storage. ## .spec.distribution.modules.tracing.tempo.externalEndpoint.endpoint ### Description -The endpoint of the external tempo backend +The external S3-compatible endpoint for Tempo's storage. ## .spec.distribution.modules.tracing.tempo.externalEndpoint.insecure ### Description -If true, the external tempo backend will not use tls +If true, will use HTTP as protocol instead of HTTPS. ## .spec.distribution.modules.tracing.tempo.externalEndpoint.secretAccessKey ### Description -The secret access key of the external tempo backend +The secret access key (password) for the external S3-compatible bucket. ## .spec.distribution.modules.tracing.tempo.overrides @@ -4025,7 +4405,7 @@ The secret access key of the external tempo backend ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.tracing.tempo.overrides.tolerations @@ -4040,13 +4420,13 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the minio module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.tracing.tempo.overrides.tolerations.effect ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -4064,7 +4444,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -4081,17 +4461,19 @@ The value of the toleration ### Description -The retention time for the tempo pods +The retention time for the traces stored in Tempo. ## .spec.distribution.modules.tracing.type ### Description -The type of tracing to use, either ***none*** or ***tempo*** +The type of tracing to use, either `none` or `tempo`. `none` will disable the Tracing module and `tempo` will install a Grafana Tempo deployment. + +Default is `tempo`. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:--------| @@ -4100,6 +4482,10 @@ The type of tracing to use, either ***none*** or ***tempo*** ## .spec.distributionVersion +### Description + +Defines which KFD version will be installed and, in consequence, the Kubernetes version used to create the cluster. It supports git tags and branches. Example: `v1.30.1`. + ### Constraints **minimum length**: the minimum number of characters for this string is: `1` @@ -4123,6 +4509,10 @@ The type of tracing to use, either ***none*** or ***tempo*** | [ssh](#speckubernetesssh) | `object` | Required | | [svcCidr](#speckubernetessvccidr) | `string` | Required | +### Description + +Defines the Kubernetes components configuration and the values needed for the kubernetes phase of furyctl. + ## .spec.kubernetes.advanced ### Properties @@ -4149,11 +4539,15 @@ The type of tracing to use, either ***none*** or ***tempo*** | [runcChecksum](#speckubernetesadvancedairgapruncchecksum) | `string` | Optional | | [runcDownloadUrl](#speckubernetesadvancedairgapruncdownloadurl) | `string` | Optional | +### Description + +Advanced configuration for air-gapped installations. Allows setting custom URLs where to download the binaries dependencies from and custom .deb and .rpm package repositories. + ## .spec.kubernetes.advanced.airGap.containerdDownloadUrl ### Description -The containerd download url +URL where to download the `.tar.gz` with containerd from. The `tar.gz` should be as the one downloaded from containerd GitHub releases page. ## .spec.kubernetes.advanced.airGap.dependenciesOverride @@ -4179,25 +4573,25 @@ The containerd download url ### Description -The gpg key of the apt dependency +URL where to download the GPG key of the Apt repository. Example: `https://pkgs.k8s.io/core:/stable:/v1.29/deb/Release.key` ## .spec.kubernetes.advanced.airGap.dependenciesOverride.apt.gpg_key_id ### Description -The gpg key id of the apt dependency +The GPG key ID of the Apt repository. Example: `36A1D7869245C8950F966E92D8576A8BA88D21E9` ## .spec.kubernetes.advanced.airGap.dependenciesOverride.apt.name ### Description -The name of the apt dependency +An indicative name for the Apt repository. Example: `k8s-1.29` ## .spec.kubernetes.advanced.airGap.dependenciesOverride.apt.repo ### Description -The repo of the apt dependency +A source string for the new Apt repository. Example: `deb https://pkgs.k8s.io/core:/stable:/v1.29/deb/ /` ## .spec.kubernetes.advanced.airGap.dependenciesOverride.yum @@ -4215,49 +4609,49 @@ The repo of the apt dependency ### Description -The gpg key of the yum dependency +URL where to download the ASCII-armored GPG key of the Yum repository. Example: `https://pkgs.k8s.io/core:/stable:/v1.29/deb/Release.key` ## .spec.kubernetes.advanced.airGap.dependenciesOverride.yum.gpg_key_check ### Description -If true, the gpg key check will be enabled +If true, the GPG signature check on the packages will be enabled. ## .spec.kubernetes.advanced.airGap.dependenciesOverride.yum.name ### Description -The name of the yum dependency +An indicative name for the Yum repository. Example: `k8s-1.29` ## .spec.kubernetes.advanced.airGap.dependenciesOverride.yum.repo ### Description -The repo of the yum dependency +URL to the directory where the Yum repository's `repodata` directory lives. Example: `https://pkgs.k8s.io/core:/stable:/v1.29/rpm/` ## .spec.kubernetes.advanced.airGap.dependenciesOverride.yum.repo_gpg_check ### Description -If true, the repo gpg check will be enabled +If true, the GPG signature check on the `repodata` will be enabled. ## .spec.kubernetes.advanced.airGap.etcdDownloadUrl ### Description -The etcd download url +URL to the path where the etcd `tar.gz`s are available. etcd will be downloaded from `//etcd--linux-.tar.gz` ## .spec.kubernetes.advanced.airGap.runcChecksum ### Description -The runc checksum +Checksum for the runc binary. ## .spec.kubernetes.advanced.airGap.runcDownloadUrl ### Description -The runc download url +URL where to download the runc binary from. ## .spec.kubernetes.advanced.cloud @@ -4272,13 +4666,13 @@ The runc download url ### Description -The cloud config to use +Sets cloud config for the Kubelet ## .spec.kubernetes.advanced.cloud.provider ### Description -The cloud provider to use +Sets the cloud provider for the Kubelet ## .spec.kubernetes.advanced.containerd @@ -4288,6 +4682,10 @@ The cloud provider to use |:--------------------------------------------------------------------|:--------|:---------| | [registryConfigs](#speckubernetesadvancedcontainerdregistryconfigs) | `array` | Optional | +### Description + +Advanced configuration for containerd + ## .spec.kubernetes.advanced.containerd.registryConfigs ### Properties @@ -4303,18 +4701,38 @@ The cloud provider to use ### Description Allows specifying custom configuration for a registry at containerd level. You can set authentication details and mirrors for a registry. -This feature can be used for example to authenticate to a private registry at containerd (container runtime) level, i.e. globally instead of using `imagePullSecrets`. It also can be used to use a mirror for a registry or to enable insecure connections to trusted registries that don't support TLS. +This feature can be used for example to authenticate to a private registry at containerd (container runtime) level, i.e. globally instead of using `imagePullSecrets`. It also can be used to use a mirror for a registry or to enable insecure connections to trusted registries that have self-signed certificates. ## .spec.kubernetes.advanced.containerd.registryConfigs.insecureSkipVerify +### Description + +Set to `true` to skip TLS verification (e.g. when using self-signed certificates). + ## .spec.kubernetes.advanced.containerd.registryConfigs.mirrorEndpoint +### Description + +Array of URLs with the mirrors to use for the registry. Example: `["http://mymirror.tld:8080"]` + ## .spec.kubernetes.advanced.containerd.registryConfigs.password +### Description + +The password containerd will use to authenticate against the registry. + ## .spec.kubernetes.advanced.containerd.registryConfigs.registry +### Description + +Registry address on which you would like to configure authentication or mirror(s). Example: `myregistry.tld:5000` + ## .spec.kubernetes.advanced.containerd.registryConfigs.username +### Description + +The username containerd will use to authenticate against the registry. + ## .spec.kubernetes.advanced.encryption ### Properties @@ -4328,13 +4746,40 @@ This feature can be used for example to authenticate to a private registry at co ### Description -The configuration to use +etcd's encryption at rest configuration. Must be a string with the EncryptionConfiguration object in YAML. Example: + +```yaml + +apiVersion: apiserver.config.k8s.io/v1 +kind: EncryptionConfiguration +resources: + - resources: + - secrets + providers: + - aescbc: + keys: + - name: mykey + secret: base64_encoded_secret +``` + ## .spec.kubernetes.advanced.encryption.tlsCipherSuites ### Description -The tls cipher suites to use +The TLS cipher suites to use for etcd, kubelet, and kubeadm static pods. Example: +```yaml +tlsCipherSuites: + - "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256" + - "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256" + - "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384" + - "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384" + - "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256" + - "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256" + - "TLS_AES_128_GCM_SHA256" + - "TLS_AES_256_GCM_SHA384" + - "TLS_CHACHA20_POLY1305_SHA256" +``` ## .spec.kubernetes.advanced.oidc @@ -4350,32 +4795,52 @@ The tls cipher suites to use | [username_claim](#speckubernetesadvancedoidcusername_claim) | `string` | Optional | | [username_prefix](#speckubernetesadvancedoidcusername_prefix) | `string` | Optional | +### Description + +OIDC configuration for the Kubernetes API server. + ## .spec.kubernetes.advanced.oidc.ca_file ### Description -The ca file of the oidc provider +The path to the certificate for the CA that signed the identity provider's web certificate. Defaults to the host's root CAs. This should be a path available to the API Server. ## .spec.kubernetes.advanced.oidc.client_id ### Description -The client id of the oidc provider +The client ID the API server will use to authenticate to the OIDC provider. ## .spec.kubernetes.advanced.oidc.group_prefix +### Description + +Prefix prepended to group claims to prevent clashes with existing names (such as system: groups). + ## .spec.kubernetes.advanced.oidc.groups_claim +### Description + +JWT claim to use as the user's group. + ## .spec.kubernetes.advanced.oidc.issuer_url ### Description -The issuer url of the oidc provider +The issuer URL of the OIDC provider. ## .spec.kubernetes.advanced.oidc.username_claim +### Description + +JWT claim to use as the user name. The default value is `sub`, which is expected to be a unique identifier of the end user. + ## .spec.kubernetes.advanced.oidc.username_prefix +### Description + +Prefix prepended to username claims to prevent clashes with existing names (such as system: users). + ## .spec.kubernetes.advanced.registry ### Description @@ -4395,13 +4860,13 @@ URL of the registry where to pull images from for the Kubernetes phase. (Default ### Description -The names of the users +List of user names to create and get a kubeconfig file. Users will not have any permissions by default, RBAC setup for the new users is needed. ## .spec.kubernetes.advanced.users.org ### Description -The org of the users +The organization the users belong to. ## .spec.kubernetes.advancedAnsible @@ -4416,25 +4881,25 @@ The org of the users ### Description -Additional config to append to the ansible.cfg file +Additional configuration to append to the ansible.cfg file ## .spec.kubernetes.advancedAnsible.pythonInterpreter ### Description -The python interpreter to use +The Python interpreter to use for running Ansible. Example: python3 ## .spec.kubernetes.controlPlaneAddress ### Description -The address of the control plane +The address for the Kubernetes control plane. Usually a DNS entry pointing to a Load Balancer on port 6443. ## .spec.kubernetes.dnsZone ### Description -The DNS zone to use for the cluster +The DNS zone of the machines. It will be appended to the name of each host to generate the `kubernetes_hostname` in the Ansible inventory file. It is also used to calculate etcd's initial cluster value. ## .spec.kubernetes.loadBalancers @@ -4452,13 +4917,13 @@ The DNS zone to use for the cluster ### Description -The additional config to use +Additional configuration to append to HAProxy's configuration file. ## .spec.kubernetes.loadBalancers.enabled ### Description -If true, the load balancers will be enabled +Set to true to install HAProxy and configure it as a load balancer on the the load balancer hosts. ## .spec.kubernetes.loadBalancers.hosts @@ -4473,13 +4938,13 @@ If true, the load balancers will be enabled ### Description -The IP of the host +The IP address of the host. ## .spec.kubernetes.loadBalancers.hosts.name ### Description -The name of the host +A name to identify the host. This value will be concatenated to `.spec.kubernetes.dnsZone` to calculate the FQDN for the host as `.`. ## .spec.kubernetes.loadBalancers.keepalived @@ -4497,31 +4962,31 @@ The name of the host ### Description -If true, keepalived will be enabled +Set to install keepalived with a floating virtual IP shared between the load balancer hosts for a deployment in High Availability. ## .spec.kubernetes.loadBalancers.keepalived.interface ### Description -The interface to use +Name of the network interface where to bind the Keepalived virtual IP. ## .spec.kubernetes.loadBalancers.keepalived.ip ### Description -The IP to use +The Virtual floating IP for Keepalived ## .spec.kubernetes.loadBalancers.keepalived.passphrase ### Description -The passphrase to use +The passphrase for the Keepalived clustering. ## .spec.kubernetes.loadBalancers.keepalived.virtualRouterId ### Description -The virtual router ID to use +The virtual router ID of Keepalived, must be different from other Keepalived instances in the same network. ## .spec.kubernetes.loadBalancers.stats @@ -4532,17 +4997,21 @@ The virtual router ID to use | [password](#speckubernetesloadbalancersstatspassword) | `string` | Required | | [username](#speckubernetesloadbalancersstatsusername) | `string` | Required | +### Description + +Configuration for HAProxy stats page. Accessible at http://:1936/stats + ## .spec.kubernetes.loadBalancers.stats.password ### Description -The password to use +The basic-auth password for HAProxy's stats page. ## .spec.kubernetes.loadBalancers.stats.username ### Description -The username to use +The basic-auth username for HAProxy's stats page ## .spec.kubernetes.masters @@ -4552,6 +5021,10 @@ The username to use |:-------------------------------------|:--------|:---------| | [hosts](#speckubernetesmastershosts) | `array` | Required | +### Description + +Configuration for the control plane hosts + ## .spec.kubernetes.masters.hosts ### Properties @@ -4565,13 +5038,13 @@ The username to use ### Description -The IP of the host +The IP address of the host ## .spec.kubernetes.masters.hosts.name ### Description -The name of the host +A name to identify the host. This value will be concatenated to `.spec.kubernetes.dnsZone` to calculate the FQDN for the host as `.`. ## .spec.kubernetes.nodes @@ -4583,9 +5056,9 @@ The name of the host | [name](#speckubernetesnodesname) | `string` | Required | | [taints](#speckubernetesnodestaints) | `array` | Optional | -### Constraints +### Description -**minimum number of items**: the minimum number of items for this array is: `1` +Configuration for the node hosts ## .spec.kubernetes.nodes.hosts @@ -4602,10 +5075,22 @@ The name of the host ## .spec.kubernetes.nodes.hosts.ip +### Description + +The IP address of the host + ## .spec.kubernetes.nodes.hosts.name +### Description + +A name to identify the host. This value will be concatenated to `.spec.kubernetes.dnsZone` to calculate the FQDN for the host as `.`. + ## .spec.kubernetes.nodes.name +### Description + +Name for the node group. It will be also used as the node role label. It should follow the [valid variable names guideline](https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_variables.html#valid-variable-names) from Ansible. + ## .spec.kubernetes.nodes.taints ### Properties @@ -4620,7 +5105,7 @@ The name of the host ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -4636,13 +5121,13 @@ The name of the host ### Description -The folder where the PKI will be stored +The path to the folder where the PKI files for Kubernetes and etcd are stored. ## .spec.kubernetes.podCidr ### Description -The CIDR to use for the pods +The subnet CIDR to use for the Pods network. ### Constraints @@ -4668,7 +5153,7 @@ The CIDR to use for the pods ### Description -The HTTP proxy to use +The HTTP proxy URL. Example: http://test.example.dev:3128 ### Constraints @@ -4684,7 +5169,7 @@ The HTTP proxy to use ### Description -The HTTPS proxy to use +The HTTPS proxy URL. Example: https://test.example.dev:3128 ### Constraints @@ -4700,7 +5185,8 @@ The HTTPS proxy to use ### Description -The no proxy to use +Comma-separated list of hosts that should not use the HTTP(S) proxy. Example: +localhost,127.0.0.1,172.16.0.0/17,172.16.128.0/17,10.0.0.0/8,.example.dev ## .spec.kubernetes.ssh @@ -4711,23 +5197,27 @@ The no proxy to use | [keyPath](#speckubernetessshkeypath) | `string` | Required | | [username](#speckubernetessshusername) | `string` | Required | +### Description + +SSH credentials to access the hosts + ## .spec.kubernetes.ssh.keyPath ### Description -The path to the private key to use to connect to the nodes +The path to the private key to use to connect to the hosts ## .spec.kubernetes.ssh.username ### Description -The username to use to connect to the nodes +The username to use to connect to the hosts ## .spec.kubernetes.svcCidr ### Description -The CIDR to use for the services +The subnet CIDR to use for the Services network. ### Constraints @@ -4761,14 +5251,15 @@ The CIDR to use for the services ### Properties -| Property | Type | Required | -|:-----------------------------------------------|:---------|:---------| -| [chart](#specpluginshelmreleaseschart) | `string` | Required | -| [name](#specpluginshelmreleasesname) | `string` | Required | -| [namespace](#specpluginshelmreleasesnamespace) | `string` | Required | -| [set](#specpluginshelmreleasesset) | `array` | Optional | -| [values](#specpluginshelmreleasesvalues) | `array` | Optional | -| [version](#specpluginshelmreleasesversion) | `string` | Optional | +| Property | Type | Required | +|:---------------------------------------------------------------------------------|:----------|:---------| +| [chart](#specpluginshelmreleaseschart) | `string` | Required | +| [disableValidationOnInstall](#specpluginshelmreleasesdisablevalidationoninstall) | `boolean` | Optional | +| [name](#specpluginshelmreleasesname) | `string` | Required | +| [namespace](#specpluginshelmreleasesnamespace) | `string` | Required | +| [set](#specpluginshelmreleasesset) | `array` | Optional | +| [values](#specpluginshelmreleasesvalues) | `array` | Optional | +| [version](#specpluginshelmreleasesversion) | `string` | Optional | ## .spec.plugins.helm.releases.chart @@ -4776,6 +5267,12 @@ The CIDR to use for the services The chart of the release +## .spec.plugins.helm.releases.disableValidationOnInstall + +### Description + +Disable running `helm diff` validation when installing the plugin, it will still be done when upgrading. + ## .spec.plugins.helm.releases.name ### Description diff --git a/go.mod b/go.mod index 121af67c6..8fbb402a3 100644 --- a/go.mod +++ b/go.mod @@ -1,11 +1,12 @@ module github.com/sighupio/fury-distribution -go 1.21 +go 1.23 require ( github.com/Al-Pragliola/go-version v1.6.2 github.com/go-playground/validator/v10 v10.15.5 - golang.org/x/exp v0.0.0-20231006140011-7918f672742d + github.com/sighupio/go-jsonschema v0.15.3 + golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc ) require ( @@ -15,6 +16,7 @@ require ( github.com/leodido/go-urn v1.2.4 // indirect golang.org/x/crypto v0.14.0 // indirect golang.org/x/net v0.17.0 // indirect - golang.org/x/sys v0.13.0 // indirect + golang.org/x/sys v0.14.0 // indirect golang.org/x/text v0.13.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index 6e7eb35e8..905210baa 100644 --- a/go.sum +++ b/go.sum @@ -17,6 +17,8 @@ github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q= github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/sighupio/go-jsonschema v0.15.3 h1:q2EtYBbXFRQbRbc9/lkFyg2lmxrJFaa8737dvwm/0bo= +github.com/sighupio/go-jsonschema v0.15.3/go.mod h1:QOHAu5BGlMReCwWJx1Yf7FK+Z5D8TrVVT+SOgInHd5I= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= @@ -26,14 +28,15 @@ github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= -golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI= -golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo= +golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc h1:ao2WRsKSzW6KuUY9IWPwWahcHCgR0s52IfwutMfEbdM= +golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI= golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= -golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= -golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q= +golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= diff --git a/kfd.yaml b/kfd.yaml index c74a91eb3..810b0a188 100644 --- a/kfd.yaml +++ b/kfd.yaml @@ -2,24 +2,24 @@ # Use of this source code is governed by a BSD-style # license that can be found in the LICENSE file. -version: v1.28.4 +version: v1.28.5 modules: - auth: v0.3.0 - aws: v4.2.1 - dr: v2.3.0 - ingress: v2.3.3 - logging: v3.4.1 - monitoring: v3.2.0 - opa: v1.12.0 - networking: v1.17.0 - tracing: v1.0.3 + auth: v0.4.0 + aws: v4.3.0 + dr: v3.0.0 + ingress: v3.0.1 + logging: v4.0.0 + monitoring: v3.3.0 + opa: v1.13.0 + networking: v2.0.0 + tracing: v1.1.0 kubernetes: eks: version: 1.28 - installer: v3.1.2 + installer: v3.2.0 onpremises: - version: 1.28.7 - installer: v1.29.3-rev.2 + version: 1.28.15 + installer: v1.30.6 furyctlSchemas: eks: - apiVersion: kfd.sighup.io/v1alpha2 @@ -35,7 +35,7 @@ tools: furyagent: version: 0.4.0 kubectl: - version: 1.28.7 + version: 1.28.15 kustomize: version: 3.10.0 terraform: diff --git a/kustomization.yaml b/kustomization.yaml deleted file mode 100644 index efe55acd5..000000000 --- a/kustomization.yaml +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file. - ---- -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -# NB: This is a starting point for a kustomization.yaml file. It is not meant to be used in production as is. - -resources: - # Networking - - ./vendor/katalog/networking/calico - # OPA - - ./vendor/katalog/opa/gatekeeper/core - - ./vendor/katalog/opa/gatekeeper/rules/templates - - ./vendor/katalog/opa/gatekeeper/rules/config - - ./vendor/katalog/opa/gatekeeper/gpm - # Monitoring - - ./vendor/katalog/monitoring/prometheus-operator - - ./vendor/katalog/monitoring/prometheus-operated - - ./vendor/katalog/monitoring/grafana - - ./vendor/katalog/monitoring/kubeadm-sm - - ./vendor/katalog/monitoring/kube-proxy-metrics - - ./vendor/katalog/monitoring/kube-state-metrics - - ./vendor/katalog/monitoring/node-exporter - - ./vendor/katalog/monitoring/prometheus-adapter - - ./vendor/katalog/monitoring/alertmanager-operated - # Logging - - ./vendor/katalog/logging/opensearch-single - - ./vendor/katalog/logging/opensearch-dashboards - - ./vendor/katalog/logging/logging-operator - - ./vendor/katalog/logging/logging-operated - - ./vendor/katalog/logging/minio-ha - - ./vendor/katalog/logging/loki-distributed - - ./vendor/katalog/logging/configs - # Ingress - - ./vendor/katalog/ingress/cert-manager - - ./vendor/katalog/ingress/nginx - - ./vendor/katalog/ingress/forecastle - # DR - - ./vendor/katalog/dr/velero/velero-on-prem - - ./vendor/katalog/dr/velero/velero-schedules - - ./vendor/katalog/dr/velero/velero-node-agent diff --git a/pkg/apis/config/model.go b/pkg/apis/config/model.go index 52a55f81c..d48d1e1b8 100644 --- a/pkg/apis/config/model.go +++ b/pkg/apis/config/model.go @@ -69,6 +69,7 @@ type KFDToolsCommon struct { Kustomize KFDTool `yaml:"kustomize" validate:"required"` Terraform KFDTool `yaml:"terraform" validate:"required"` Yq KFDTool `yaml:"yq" validate:"required"` + Kapp KFDTool `yaml:"kapp"` Helm KFDTool `yaml:"helm"` Helmfile KFDTool `yaml:"helmfile"` } diff --git a/pkg/apis/config/validation_test.go b/pkg/apis/config/validation_test.go index 8189ff0ab..bac977fef 100644 --- a/pkg/apis/config/validation_test.go +++ b/pkg/apis/config/validation_test.go @@ -32,8 +32,6 @@ func TestValidateAwsRegion(t *testing.T) { }, } for _, tC := range testCases { - tC := tC - t.Run(tC.desc, func(t *testing.T) { t.Parallel() diff --git a/pkg/apis/ekscluster/v1alpha2/private/schema.go b/pkg/apis/ekscluster/v1alpha2/private/schema.go index 85a0d6d39..a96dea2a0 100644 --- a/pkg/apis/ekscluster/v1alpha2/private/schema.go +++ b/pkg/apis/ekscluster/v1alpha2/private/schema.go @@ -6,97 +6,255 @@ import ( "encoding/json" "fmt" "reflect" + + "github.com/sighupio/go-jsonschema/pkg/types" ) -// A Fury Cluster deployed through AWS's Elastic Kubernetes Service -type EksclusterKfdV1Alpha2 struct { - // ApiVersion corresponds to the JSON schema field "apiVersion". - ApiVersion string `json:"apiVersion" yaml:"apiVersion" mapstructure:"apiVersion"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesMonitoringType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesMonitoringType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringType, v) + } + *j = SpecDistributionModulesMonitoringType(v) + return nil +} - // Kind corresponds to the JSON schema field "kind". - Kind EksclusterKfdV1Alpha2Kind `json:"kind" yaml:"kind" mapstructure:"kind"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesLoggingType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesLoggingType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingType, v) + } + *j = SpecDistributionModulesLoggingType(v) + return nil +} - // Metadata corresponds to the JSON schema field "metadata". - Metadata Metadata `json:"metadata" yaml:"metadata" mapstructure:"metadata"` +type TypesKubeNodeSelector map[string]string - // Spec corresponds to the JSON schema field "spec". - Spec Spec `json:"spec" yaml:"spec" mapstructure:"spec"` +type SpecDistributionCommonProvider struct { + // The provider type. Don't set. FOR INTERNAL USE ONLY. + Type string `json:"type" yaml:"type" mapstructure:"type"` } -type EksclusterKfdV1Alpha2Kind string +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionCommonProvider) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionCommonProvider: required") + } + type Plain SpecDistributionCommonProvider + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionCommonProvider(plain) + return nil +} -const EksclusterKfdV1Alpha2KindEKSCluster EksclusterKfdV1Alpha2Kind = "EKSCluster" +type TypesKubeTolerationEffect string -type Metadata struct { - // Name corresponds to the JSON schema field "name". - Name string `json:"name" yaml:"name" mapstructure:"name"` +var enumValues_TypesKubeTolerationEffect = []interface{}{ + "NoSchedule", + "PreferNoSchedule", + "NoExecute", } -type Spec struct { - // Distribution corresponds to the JSON schema field "distribution". - Distribution SpecDistribution `json:"distribution" yaml:"distribution" mapstructure:"distribution"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *TypesKubeTolerationEffect) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_TypesKubeTolerationEffect { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationEffect, v) + } + *j = TypesKubeTolerationEffect(v) + return nil +} - // DistributionVersion corresponds to the JSON schema field "distributionVersion". - DistributionVersion string `json:"distributionVersion" yaml:"distributionVersion" mapstructure:"distributionVersion"` +const ( + TypesKubeTolerationEffectNoSchedule TypesKubeTolerationEffect = "NoSchedule" + TypesKubeTolerationEffectPreferNoSchedule TypesKubeTolerationEffect = "PreferNoSchedule" + TypesKubeTolerationEffectNoExecute TypesKubeTolerationEffect = "NoExecute" +) - // Infrastructure corresponds to the JSON schema field "infrastructure". - Infrastructure *SpecInfrastructure `json:"infrastructure,omitempty" yaml:"infrastructure,omitempty" mapstructure:"infrastructure,omitempty"` +type TypesKubeTolerationOperator string - // Kubernetes corresponds to the JSON schema field "kubernetes". - Kubernetes SpecKubernetes `json:"kubernetes" yaml:"kubernetes" mapstructure:"kubernetes"` +var enumValues_TypesKubeTolerationOperator = []interface{}{ + "Exists", + "Equal", +} - // Plugins corresponds to the JSON schema field "plugins". - Plugins *SpecPlugins `json:"plugins,omitempty" yaml:"plugins,omitempty" mapstructure:"plugins,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *TypesKubeTolerationOperator) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_TypesKubeTolerationOperator { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationOperator, v) + } + *j = TypesKubeTolerationOperator(v) + return nil +} - // Region corresponds to the JSON schema field "region". - Region TypesAwsRegion `json:"region" yaml:"region" mapstructure:"region"` +const ( + TypesKubeTolerationOperatorExists TypesKubeTolerationOperator = "Exists" + TypesKubeTolerationOperatorEqual TypesKubeTolerationOperator = "Equal" +) - // This map defines which will be the common tags that will be added to all the - // resources created on AWS. - Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"` +type TypesKubeToleration struct { + // Effect corresponds to the JSON schema field "effect". + Effect TypesKubeTolerationEffect `json:"effect" yaml:"effect" mapstructure:"effect"` - // ToolsConfiguration corresponds to the JSON schema field "toolsConfiguration". - ToolsConfiguration SpecToolsConfiguration `json:"toolsConfiguration" yaml:"toolsConfiguration" mapstructure:"toolsConfiguration"` -} + // The key of the toleration + Key string `json:"key" yaml:"key" mapstructure:"key"` -type SpecDistribution struct { - // Common corresponds to the JSON schema field "common". - Common *SpecDistributionCommon `json:"common,omitempty" yaml:"common,omitempty" mapstructure:"common,omitempty"` + // Operator corresponds to the JSON schema field "operator". + Operator *TypesKubeTolerationOperator `json:"operator,omitempty" yaml:"operator,omitempty" mapstructure:"operator,omitempty"` - // CustomPatches corresponds to the JSON schema field "customPatches". - CustomPatches *SpecDistributionCustompatches `json:"customPatches,omitempty" yaml:"customPatches,omitempty" mapstructure:"customPatches,omitempty"` + // The value of the toleration + Value *string `json:"value,omitempty" yaml:"value,omitempty" mapstructure:"value,omitempty"` +} - // Modules corresponds to the JSON schema field "modules". - Modules SpecDistributionModules `json:"modules" yaml:"modules" mapstructure:"modules"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *TypesKubeToleration) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["effect"]; !ok || v == nil { + return fmt.Errorf("field effect in TypesKubeToleration: required") + } + if v, ok := raw["key"]; !ok || v == nil { + return fmt.Errorf("field key in TypesKubeToleration: required") + } + type Plain TypesKubeToleration + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = TypesKubeToleration(plain) + return nil } +// Common configuration for all the distribution modules. type SpecDistributionCommon struct { - // The node selector to use to place the pods for all the KFD modules + // The node selector to use to place the pods for all the KFD modules. Follows + // Kubernetes selector format. Example: `node.kubernetes.io/role: infra`. NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` // Provider corresponds to the JSON schema field "provider". Provider *SpecDistributionCommonProvider `json:"provider,omitempty" yaml:"provider,omitempty" mapstructure:"provider,omitempty"` // URL of the registry where to pull images from for the Distribution phase. - // (Default is registry.sighup.io/fury). + // (Default is `registry.sighup.io/fury`). // // NOTE: If plugins are pulling from the default registry, the registry will be - // replaced for these plugins too. + // replaced for the plugin too. Registry *string `json:"registry,omitempty" yaml:"registry,omitempty" mapstructure:"registry,omitempty"` - // The relative path to the vendor directory, does not need to be changed + // The relative path to the vendor directory, does not need to be changed. RelativeVendorPath *string `json:"relativeVendorPath,omitempty" yaml:"relativeVendorPath,omitempty" mapstructure:"relativeVendorPath,omitempty"` - // The tolerations that will be added to the pods for all the KFD modules + // An array with the tolerations that will be added to the pods for all the KFD + // modules. Follows Kubernetes tolerations format. Example: + // + // ```yaml + // - effect: NoSchedule + // key: node.kubernetes.io/role + // value: infra + // ``` Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } -type SpecDistributionCommonProvider struct { - // The type of the provider, must be EKS if specified - Type string `json:"type" yaml:"type" mapstructure:"type"` +type SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior string + +var enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = []interface{}{ + "create", + "replace", + "merge", } -type SpecDistributionCustomPatchesConfigMapGenerator []SpecDistributionCustomPatchesConfigMapGeneratorResource +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior, v) + } + *j = SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior(v) + return nil +} + +const ( + SpecDistributionCustomPatchesConfigMapGeneratorResourceBehaviorCreate SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = "create" + SpecDistributionCustomPatchesConfigMapGeneratorResourceBehaviorReplace SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = "replace" + SpecDistributionCustomPatchesConfigMapGeneratorResourceBehaviorMerge SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = "merge" +) + +type TypesKubeLabels map[string]string + +type SpecDistributionCustomPatchesConfigMapGeneratorResourceOptions struct { + // The annotations of the configmap + Annotations TypesKubeLabels `json:"annotations,omitempty" yaml:"annotations,omitempty" mapstructure:"annotations,omitempty"` + + // If true, the name suffix hash will be disabled + DisableNameSuffixHash *bool `json:"disableNameSuffixHash,omitempty" yaml:"disableNameSuffixHash,omitempty" mapstructure:"disableNameSuffixHash,omitempty"` + + // If true, the configmap will be immutable + Immutable *bool `json:"immutable,omitempty" yaml:"immutable,omitempty" mapstructure:"immutable,omitempty"` + + // The labels of the configmap + Labels TypesKubeLabels `json:"labels,omitempty" yaml:"labels,omitempty" mapstructure:"labels,omitempty"` +} type SpecDistributionCustomPatchesConfigMapGeneratorResource struct { // The behavior of the configmap @@ -121,45 +279,29 @@ type SpecDistributionCustomPatchesConfigMapGeneratorResource struct { Options *SpecDistributionCustomPatchesConfigMapGeneratorResourceOptions `json:"options,omitempty" yaml:"options,omitempty" mapstructure:"options,omitempty"` } -type SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior string +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionCustomPatchesConfigMapGeneratorResource) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionCustomPatchesConfigMapGeneratorResource: required") + } + type Plain SpecDistributionCustomPatchesConfigMapGeneratorResource + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionCustomPatchesConfigMapGeneratorResource(plain) + return nil +} -const ( - SpecDistributionCustomPatchesConfigMapGeneratorResourceBehaviorCreate SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = "create" - SpecDistributionCustomPatchesConfigMapGeneratorResourceBehaviorMerge SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = "merge" - SpecDistributionCustomPatchesConfigMapGeneratorResourceBehaviorReplace SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = "replace" -) - -type SpecDistributionCustomPatchesConfigMapGeneratorResourceOptions struct { - // The annotations of the configmap - Annotations TypesKubeLabels `json:"annotations,omitempty" yaml:"annotations,omitempty" mapstructure:"annotations,omitempty"` - - // If true, the name suffix hash will be disabled - DisableNameSuffixHash *bool `json:"disableNameSuffixHash,omitempty" yaml:"disableNameSuffixHash,omitempty" mapstructure:"disableNameSuffixHash,omitempty"` - - // If true, the configmap will be immutable - Immutable *bool `json:"immutable,omitempty" yaml:"immutable,omitempty" mapstructure:"immutable,omitempty"` - - // The labels of the configmap - Labels TypesKubeLabels `json:"labels,omitempty" yaml:"labels,omitempty" mapstructure:"labels,omitempty"` -} +type SpecDistributionCustomPatchesConfigMapGenerator []SpecDistributionCustomPatchesConfigMapGeneratorResource // Each entry should follow the format of Kustomize's images patch type SpecDistributionCustomPatchesImages []map[string]interface{} -type SpecDistributionCustomPatchesPatch struct { - // Options corresponds to the JSON schema field "options". - Options *SpecDistributionCustomPatchesPatchOptions `json:"options,omitempty" yaml:"options,omitempty" mapstructure:"options,omitempty"` - - // The patch content - Patch *string `json:"patch,omitempty" yaml:"patch,omitempty" mapstructure:"patch,omitempty"` - - // The path of the patch - Path *string `json:"path,omitempty" yaml:"path,omitempty" mapstructure:"path,omitempty"` - - // Target corresponds to the JSON schema field "target". - Target *SpecDistributionCustomPatchesPatchTarget `json:"target,omitempty" yaml:"target,omitempty" mapstructure:"target,omitempty"` -} - type SpecDistributionCustomPatchesPatchOptions struct { // If true, the kind change will be allowed AllowKindChange *bool `json:"allowKindChange,omitempty" yaml:"allowKindChange,omitempty" mapstructure:"allowKindChange,omitempty"` @@ -191,13 +333,73 @@ type SpecDistributionCustomPatchesPatchTarget struct { Version *string `json:"version,omitempty" yaml:"version,omitempty" mapstructure:"version,omitempty"` } +type SpecDistributionCustomPatchesPatch struct { + // Options corresponds to the JSON schema field "options". + Options *SpecDistributionCustomPatchesPatchOptions `json:"options,omitempty" yaml:"options,omitempty" mapstructure:"options,omitempty"` + + // The patch content + Patch *string `json:"patch,omitempty" yaml:"patch,omitempty" mapstructure:"patch,omitempty"` + + // The path of the patch + Path *string `json:"path,omitempty" yaml:"path,omitempty" mapstructure:"path,omitempty"` + + // Target corresponds to the JSON schema field "target". + Target *SpecDistributionCustomPatchesPatchTarget `json:"target,omitempty" yaml:"target,omitempty" mapstructure:"target,omitempty"` +} + type SpecDistributionCustomPatchesPatches []SpecDistributionCustomPatchesPatch // Each entry should be either a relative file path or an inline content resolving // to a partial or complete resource definition type SpecDistributionCustomPatchesPatchesStrategicMerge []string -type SpecDistributionCustomPatchesSecretGenerator []SpecDistributionCustomPatchesSecretGeneratorResource +type SpecDistributionCustomPatchesSecretGeneratorResourceBehavior string + +var enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = []interface{}{ + "create", + "replace", + "merge", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionCustomPatchesSecretGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior, v) + } + *j = SpecDistributionCustomPatchesSecretGeneratorResourceBehavior(v) + return nil +} + +const ( + SpecDistributionCustomPatchesSecretGeneratorResourceBehaviorCreate SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = "create" + SpecDistributionCustomPatchesSecretGeneratorResourceBehaviorReplace SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = "replace" + SpecDistributionCustomPatchesSecretGeneratorResourceBehaviorMerge SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = "merge" +) + +type SpecDistributionCustomPatchesSecretGeneratorResourceOptions struct { + // The annotations of the secret + Annotations TypesKubeLabels `json:"annotations,omitempty" yaml:"annotations,omitempty" mapstructure:"annotations,omitempty"` + + // If true, the name suffix hash will be disabled + DisableNameSuffixHash *bool `json:"disableNameSuffixHash,omitempty" yaml:"disableNameSuffixHash,omitempty" mapstructure:"disableNameSuffixHash,omitempty"` + + // If true, the secret will be immutable + Immutable *bool `json:"immutable,omitempty" yaml:"immutable,omitempty" mapstructure:"immutable,omitempty"` + + // The labels of the secret + Labels TypesKubeLabels `json:"labels,omitempty" yaml:"labels,omitempty" mapstructure:"labels,omitempty"` +} type SpecDistributionCustomPatchesSecretGeneratorResource struct { // The behavior of the secret @@ -225,28 +427,26 @@ type SpecDistributionCustomPatchesSecretGeneratorResource struct { Type *string `json:"type,omitempty" yaml:"type,omitempty" mapstructure:"type,omitempty"` } -type SpecDistributionCustomPatchesSecretGeneratorResourceBehavior string - -const ( - SpecDistributionCustomPatchesSecretGeneratorResourceBehaviorCreate SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = "create" - SpecDistributionCustomPatchesSecretGeneratorResourceBehaviorMerge SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = "merge" - SpecDistributionCustomPatchesSecretGeneratorResourceBehaviorReplace SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = "replace" -) - -type SpecDistributionCustomPatchesSecretGeneratorResourceOptions struct { - // The annotations of the secret - Annotations TypesKubeLabels `json:"annotations,omitempty" yaml:"annotations,omitempty" mapstructure:"annotations,omitempty"` - - // If true, the name suffix hash will be disabled - DisableNameSuffixHash *bool `json:"disableNameSuffixHash,omitempty" yaml:"disableNameSuffixHash,omitempty" mapstructure:"disableNameSuffixHash,omitempty"` - - // If true, the secret will be immutable - Immutable *bool `json:"immutable,omitempty" yaml:"immutable,omitempty" mapstructure:"immutable,omitempty"` - - // The labels of the secret - Labels TypesKubeLabels `json:"labels,omitempty" yaml:"labels,omitempty" mapstructure:"labels,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionCustomPatchesSecretGeneratorResource) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionCustomPatchesSecretGeneratorResource: required") + } + type Plain SpecDistributionCustomPatchesSecretGeneratorResource + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionCustomPatchesSecretGeneratorResource(plain) + return nil } +type SpecDistributionCustomPatchesSecretGenerator []SpecDistributionCustomPatchesSecretGeneratorResource + type SpecDistributionCustompatches struct { // ConfigMapGenerator corresponds to the JSON schema field "configMapGenerator". ConfigMapGenerator SpecDistributionCustomPatchesConfigMapGenerator `json:"configMapGenerator,omitempty" yaml:"configMapGenerator,omitempty" mapstructure:"configMapGenerator,omitempty"` @@ -265,57 +465,41 @@ type SpecDistributionCustompatches struct { SecretGenerator SpecDistributionCustomPatchesSecretGenerator `json:"secretGenerator,omitempty" yaml:"secretGenerator,omitempty" mapstructure:"secretGenerator,omitempty"` } -type SpecDistributionModules struct { - // Auth corresponds to the JSON schema field "auth". - Auth *SpecDistributionModulesAuth `json:"auth,omitempty" yaml:"auth,omitempty" mapstructure:"auth,omitempty"` - - // Aws corresponds to the JSON schema field "aws". - Aws *SpecDistributionModulesAws `json:"aws,omitempty" yaml:"aws,omitempty" mapstructure:"aws,omitempty"` - - // Dr corresponds to the JSON schema field "dr". - Dr SpecDistributionModulesDr `json:"dr" yaml:"dr" mapstructure:"dr"` - - // Ingress corresponds to the JSON schema field "ingress". - Ingress SpecDistributionModulesIngress `json:"ingress" yaml:"ingress" mapstructure:"ingress"` - - // Logging corresponds to the JSON schema field "logging". - Logging SpecDistributionModulesLogging `json:"logging" yaml:"logging" mapstructure:"logging"` - - // Monitoring corresponds to the JSON schema field "monitoring". - Monitoring *SpecDistributionModulesMonitoring `json:"monitoring,omitempty" yaml:"monitoring,omitempty" mapstructure:"monitoring,omitempty"` - - // Networking corresponds to the JSON schema field "networking". - Networking *SpecDistributionModulesNetworking `json:"networking,omitempty" yaml:"networking,omitempty" mapstructure:"networking,omitempty"` - - // Policy corresponds to the JSON schema field "policy". - Policy SpecDistributionModulesPolicy `json:"policy" yaml:"policy" mapstructure:"policy"` +type SpecDistributionModulesAuthDexExpiry struct { + // Dex ID tokens expiration time duration (default 24h). + IdTokens *string `json:"idTokens,omitempty" yaml:"idTokens,omitempty" mapstructure:"idTokens,omitempty"` - // Tracing corresponds to the JSON schema field "tracing". - Tracing *SpecDistributionModulesTracing `json:"tracing,omitempty" yaml:"tracing,omitempty" mapstructure:"tracing,omitempty"` + // Dex signing key expiration time duration (default 6h). + SigningKeys *string `json:"signingKeys,omitempty" yaml:"signingKeys,omitempty" mapstructure:"signingKeys,omitempty"` } -type SpecDistributionModulesAuth struct { - // The base domain for the auth module - BaseDomain *string `json:"baseDomain,omitempty" yaml:"baseDomain,omitempty" mapstructure:"baseDomain,omitempty"` - - // Dex corresponds to the JSON schema field "dex". - Dex *SpecDistributionModulesAuthDex `json:"dex,omitempty" yaml:"dex,omitempty" mapstructure:"dex,omitempty"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *SpecDistributionModulesAuthOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // Pomerium corresponds to the JSON schema field "pomerium". - Pomerium SpecDistributionModulesAuthPomerium `json:"pomerium,omitempty" yaml:"pomerium,omitempty" mapstructure:"pomerium,omitempty"` +type TypesFuryModuleComponentOverrides struct { + // Set to override the node selector used to place the pods of the package. + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - // Provider corresponds to the JSON schema field "provider". - Provider SpecDistributionModulesAuthProvider `json:"provider" yaml:"provider" mapstructure:"provider"` + // Set to override the tolerations that will be added to the pods of the package. + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } +// Configuration for the Dex package. type SpecDistributionModulesAuthDex struct { - // The additional static clients for dex + // Additional static clients defitions that will be added to the default clients + // included with the distribution in Dex's configuration. Example: + // + // ```yaml + // additionalStaticClients: + // - id: my-custom-client + // name: "A custom additional static client" + // redirectURIs: + // - "https://myapp.tld/redirect" + // - "https://alias.tld/oidc-callback" + // secret: supersecretpassword + // ``` + // Reference: https://dexidp.io/docs/connectors/local/ AdditionalStaticClients []interface{} `json:"additionalStaticClients,omitempty" yaml:"additionalStaticClients,omitempty" mapstructure:"additionalStaticClients,omitempty"` - // The connectors for dex + // A list with each item defining a Dex connector. Follows Dex connectors + // configuration format: https://dexidp.io/docs/connectors/ Connectors []interface{} `json:"connectors" yaml:"connectors" mapstructure:"connectors"` // Expiry corresponds to the JSON schema field "expiry". @@ -325,194 +509,222 @@ type SpecDistributionModulesAuthDex struct { Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } -type SpecDistributionModulesAuthDexExpiry struct { - // Dex ID tokens expiration time duration (default 24h). - IdTokens *string `json:"idTokens,omitempty" yaml:"idTokens,omitempty" mapstructure:"idTokens,omitempty"` - - // Dex signing key expiration time duration (default 6h). - SigningKeys *string `json:"signingKeys,omitempty" yaml:"signingKeys,omitempty" mapstructure:"signingKeys,omitempty"` -} - -type SpecDistributionModulesAuthOverrides struct { - // Ingresses corresponds to the JSON schema field "ingresses". - Ingresses SpecDistributionModulesAuthOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` - - // The node selector to use to place the pods for the auth module - NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - - // The tolerations that will be added to the pods for the auth module - Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuthDex) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["connectors"]; !ok || v == nil { + return fmt.Errorf("field connectors in SpecDistributionModulesAuthDex: required") + } + type Plain SpecDistributionModulesAuthDex + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAuthDex(plain) + return nil } type SpecDistributionModulesAuthOverridesIngress struct { - // The host of the ingress + // Use this host for the ingress instead of the default one. Host string `json:"host" yaml:"host" mapstructure:"host"` - // The ingress class of the ingress + // Use this ingress class for the ingress instead of the default one. IngressClass string `json:"ingressClass" yaml:"ingressClass" mapstructure:"ingressClass"` } +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuthOverridesIngress) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["host"]; !ok || v == nil { + return fmt.Errorf("field host in SpecDistributionModulesAuthOverridesIngress: required") + } + if v, ok := raw["ingressClass"]; !ok || v == nil { + return fmt.Errorf("field ingressClass in SpecDistributionModulesAuthOverridesIngress: required") + } + type Plain SpecDistributionModulesAuthOverridesIngress + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAuthOverridesIngress(plain) + return nil +} + +// Override the definition of the Auth module ingresses. type SpecDistributionModulesAuthOverridesIngresses map[string]SpecDistributionModulesAuthOverridesIngress -type SpecDistributionModulesAuthPomerium interface{} +// Override the common configuration with a particular configuration for the Auth +// module. +type SpecDistributionModulesAuthOverrides struct { + // Override the definition of the Auth module ingresses. + Ingresses SpecDistributionModulesAuthOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` -// override default routes for KFD components -type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicy struct { - // GatekeeperPolicyManager corresponds to the JSON schema field - // "gatekeeperPolicyManager". - GatekeeperPolicyManager []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyGatekeeperPolicyManagerElem `json:"gatekeeperPolicyManager,omitempty" yaml:"gatekeeperPolicyManager,omitempty" mapstructure:"gatekeeperPolicyManager,omitempty"` + // Set to override the node selector used to place the pods of the Auth module. + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - // HubbleUi corresponds to the JSON schema field "hubbleUi". - HubbleUi []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyHubbleUiElem `json:"hubbleUi,omitempty" yaml:"hubbleUi,omitempty" mapstructure:"hubbleUi,omitempty"` + // Set to override the tolerations that will be added to the pods of the Auth + // module. + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +} - // IngressNgnixForecastle corresponds to the JSON schema field - // "ingressNgnixForecastle". - IngressNgnixForecastle []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyIngressNgnixForecastleElem `json:"ingressNgnixForecastle,omitempty" yaml:"ingressNgnixForecastle,omitempty" mapstructure:"ingressNgnixForecastle,omitempty"` +type SpecDistributionModulesAuthPomerium interface{} - // LoggingMinioConsole corresponds to the JSON schema field "loggingMinioConsole". - LoggingMinioConsole []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyLoggingMinioConsoleElem `json:"loggingMinioConsole,omitempty" yaml:"loggingMinioConsole,omitempty" mapstructure:"loggingMinioConsole,omitempty"` +// Configuration for the HTTP Basic Auth provider. +type SpecDistributionModulesAuthProviderBasicAuth struct { + // The password for logging in with the HTTP basic authentication. + Password string `json:"password" yaml:"password" mapstructure:"password"` - // LoggingOpensearchDashboards corresponds to the JSON schema field - // "loggingOpensearchDashboards". - LoggingOpensearchDashboards []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyLoggingOpensearchDashboardsElem `json:"loggingOpensearchDashboards,omitempty" yaml:"loggingOpensearchDashboards,omitempty" mapstructure:"loggingOpensearchDashboards,omitempty"` - - // MonitoringAlertmanager corresponds to the JSON schema field - // "monitoringAlertmanager". - MonitoringAlertmanager []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringAlertmanagerElem `json:"monitoringAlertmanager,omitempty" yaml:"monitoringAlertmanager,omitempty" mapstructure:"monitoringAlertmanager,omitempty"` - - // MonitoringGrafana corresponds to the JSON schema field "monitoringGrafana". - MonitoringGrafana []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringGrafanaElem `json:"monitoringGrafana,omitempty" yaml:"monitoringGrafana,omitempty" mapstructure:"monitoringGrafana,omitempty"` - - // MonitoringMinioConsole corresponds to the JSON schema field - // "monitoringMinioConsole". - MonitoringMinioConsole []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringMinioConsoleElem `json:"monitoringMinioConsole,omitempty" yaml:"monitoringMinioConsole,omitempty" mapstructure:"monitoringMinioConsole,omitempty"` - - // MonitoringPrometheus corresponds to the JSON schema field - // "monitoringPrometheus". - MonitoringPrometheus []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringPrometheusElem `json:"monitoringPrometheus,omitempty" yaml:"monitoringPrometheus,omitempty" mapstructure:"monitoringPrometheus,omitempty"` - - // TracingMinioConsole corresponds to the JSON schema field "tracingMinioConsole". - TracingMinioConsole []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyTracingMinioConsoleElem `json:"tracingMinioConsole,omitempty" yaml:"tracingMinioConsole,omitempty" mapstructure:"tracingMinioConsole,omitempty"` + // The username for logging in with the HTTP basic authentication. + Username string `json:"username" yaml:"username" mapstructure:"username"` } -type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyGatekeeperPolicyManagerElem map[string]interface{} - -type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyHubbleUiElem map[string]interface{} - -type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyIngressNgnixForecastleElem map[string]interface{} - -type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyLoggingMinioConsoleElem map[string]interface{} - -type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyLoggingOpensearchDashboardsElem map[string]interface{} - -type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringAlertmanagerElem map[string]interface{} - -type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringGrafanaElem map[string]interface{} - -type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringMinioConsoleElem map[string]interface{} - -type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringPrometheusElem map[string]interface{} +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuthProviderBasicAuth) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["password"]; !ok || v == nil { + return fmt.Errorf("field password in SpecDistributionModulesAuthProviderBasicAuth: required") + } + if v, ok := raw["username"]; !ok || v == nil { + return fmt.Errorf("field username in SpecDistributionModulesAuthProviderBasicAuth: required") + } + type Plain SpecDistributionModulesAuthProviderBasicAuth + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAuthProviderBasicAuth(plain) + return nil +} -type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyTracingMinioConsoleElem map[string]interface{} +type SpecDistributionModulesAuthProviderType string -type SpecDistributionModulesAuthPomeriumRoutesElem map[string]interface{} +var enumValues_SpecDistributionModulesAuthProviderType = []interface{}{ + "none", + "basicAuth", + "sso", +} -// Pomerium needs some user-provided secrets to be fully configured. These secrets -// should be unique between clusters. -type SpecDistributionModulesAuthPomeriumSecrets struct { - // Cookie Secret is the secret used to encrypt and sign session cookies. - // - // To generate a random key, run the following command: `head -c32 /dev/urandom | - // base64` - COOKIESECRET string `json:"COOKIE_SECRET" yaml:"COOKIE_SECRET" mapstructure:"COOKIE_SECRET"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuthProviderType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesAuthProviderType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesAuthProviderType, v) + } + *j = SpecDistributionModulesAuthProviderType(v) + return nil +} - // Identity Provider Client Secret is the OAuth 2.0 Secret Identifier. When auth - // type is SSO, this value will be the secret used to authenticate Pomerium with - // Dex, **use a strong random value**. - IDPCLIENTSECRET string `json:"IDP_CLIENT_SECRET" yaml:"IDP_CLIENT_SECRET" mapstructure:"IDP_CLIENT_SECRET"` +const ( + SpecDistributionModulesAuthProviderTypeNone SpecDistributionModulesAuthProviderType = "none" + SpecDistributionModulesAuthProviderTypeBasicAuth SpecDistributionModulesAuthProviderType = "basicAuth" + SpecDistributionModulesAuthProviderTypeSso SpecDistributionModulesAuthProviderType = "sso" +) - // Shared Secret is the base64-encoded, 256-bit key used to mutually authenticate - // requests between Pomerium services. It's critical that secret keys are random, - // and stored safely. - // - // To generate a key, run the following command: `head -c32 /dev/urandom | base64` - SHAREDSECRET string `json:"SHARED_SECRET" yaml:"SHARED_SECRET" mapstructure:"SHARED_SECRET"` +type SpecDistributionModulesAuthProvider struct { + // BasicAuth corresponds to the JSON schema field "basicAuth". + BasicAuth *SpecDistributionModulesAuthProviderBasicAuth `json:"basicAuth,omitempty" yaml:"basicAuth,omitempty" mapstructure:"basicAuth,omitempty"` - // Signing Key is the base64 representation of one or more PEM-encoded private - // keys used to sign a user's attestation JWT, which can be consumed by upstream - // applications to pass along identifying user information like username, id, and - // groups. - // - // To generates an P-256 (ES256) signing key: + // The type of the Auth provider, options are: + // - `none`: will disable authentication in the infrastructural ingresses. + // - `sso`: will protect the infrastructural ingresses with Pomerium and Dex (SSO) + // and require authentication before accessing them. + // - `basicAuth`: will protect the infrastructural ingresses with HTTP basic auth + // (username and password) authentication. // - // ```bash - // openssl ecparam -genkey -name prime256v1 -noout -out ec_private.pem - // # careful! this will output your private key in terminal - // cat ec_private.pem | base64 - // ``` - SIGNINGKEY string `json:"SIGNING_KEY" yaml:"SIGNING_KEY" mapstructure:"SIGNING_KEY"` + // Default is `none`. + Type SpecDistributionModulesAuthProviderType `json:"type" yaml:"type" mapstructure:"type"` } -// Configuration for Pomerium, an identity-aware reverse proxy used for SSO. -type SpecDistributionModulesAuthPomerium_2 struct { - // DefaultRoutesPolicy corresponds to the JSON schema field "defaultRoutesPolicy". - DefaultRoutesPolicy *SpecDistributionModulesAuthPomeriumDefaultRoutesPolicy `json:"defaultRoutesPolicy,omitempty" yaml:"defaultRoutesPolicy,omitempty" mapstructure:"defaultRoutesPolicy,omitempty"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides_1 `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuthProvider) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesAuthProvider: required") + } + type Plain SpecDistributionModulesAuthProvider + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAuthProvider(plain) + return nil +} - // DEPRECATED: Use defaultRoutesPolicy and/or routes - Policy *string `json:"policy,omitempty" yaml:"policy,omitempty" mapstructure:"policy,omitempty"` +// Configuration for the Auth module. +type SpecDistributionModulesAuth struct { + // The base domain for the ingresses created by the Auth module (Gangplank, + // Pomerium, Dex). Notice that when the ingress module type is `dual`, these will + // use the `external` ingress class. + BaseDomain *string `json:"baseDomain,omitempty" yaml:"baseDomain,omitempty" mapstructure:"baseDomain,omitempty"` - // Additional routes configuration for Pomerium. Follows Pomerium's route format: - // https://www.pomerium.com/docs/reference/routes - Routes []SpecDistributionModulesAuthPomeriumRoutesElem `json:"routes,omitempty" yaml:"routes,omitempty" mapstructure:"routes,omitempty"` + // Dex corresponds to the JSON schema field "dex". + Dex *SpecDistributionModulesAuthDex `json:"dex,omitempty" yaml:"dex,omitempty" mapstructure:"dex,omitempty"` - // Secrets corresponds to the JSON schema field "secrets". - Secrets SpecDistributionModulesAuthPomeriumSecrets `json:"secrets" yaml:"secrets" mapstructure:"secrets"` -} + // Overrides corresponds to the JSON schema field "overrides". + Overrides *SpecDistributionModulesAuthOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` -type SpecDistributionModulesAuthProvider struct { - // BasicAuth corresponds to the JSON schema field "basicAuth". - BasicAuth *SpecDistributionModulesAuthProviderBasicAuth `json:"basicAuth,omitempty" yaml:"basicAuth,omitempty" mapstructure:"basicAuth,omitempty"` + // Pomerium corresponds to the JSON schema field "pomerium". + Pomerium SpecDistributionModulesAuthPomerium `json:"pomerium,omitempty" yaml:"pomerium,omitempty" mapstructure:"pomerium,omitempty"` - // The type of the provider, must be ***none***, ***sso*** or ***basicAuth*** - Type SpecDistributionModulesAuthProviderType `json:"type" yaml:"type" mapstructure:"type"` + // Provider corresponds to the JSON schema field "provider". + Provider SpecDistributionModulesAuthProvider `json:"provider" yaml:"provider" mapstructure:"provider"` } -type SpecDistributionModulesAuthProviderBasicAuth struct { - // The password for the basic auth - Password string `json:"password" yaml:"password" mapstructure:"password"` - - // The username for the basic auth - Username string `json:"username" yaml:"username" mapstructure:"username"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuth) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["provider"]; !ok || v == nil { + return fmt.Errorf("field provider in SpecDistributionModulesAuth: required") + } + type Plain SpecDistributionModulesAuth + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAuth(plain) + return nil } -type SpecDistributionModulesAuthProviderType string - -const ( - SpecDistributionModulesAuthProviderTypeBasicAuth SpecDistributionModulesAuthProviderType = "basicAuth" - SpecDistributionModulesAuthProviderTypeNone SpecDistributionModulesAuthProviderType = "none" - SpecDistributionModulesAuthProviderTypeSso SpecDistributionModulesAuthProviderType = "sso" -) - -type SpecDistributionModulesAws struct { - // ClusterAutoscaler corresponds to the JSON schema field "clusterAutoscaler". - ClusterAutoscaler SpecDistributionModulesAwsClusterAutoscaler `json:"clusterAutoscaler" yaml:"clusterAutoscaler" mapstructure:"clusterAutoscaler"` +type TypesAwsArn string - // EbsCsiDriver corresponds to the JSON schema field "ebsCsiDriver". - EbsCsiDriver SpecDistributionModulesAwsEbsCsiDriver `json:"ebsCsiDriver" yaml:"ebsCsiDriver" mapstructure:"ebsCsiDriver"` +type TypesAwsIamRoleName string - // EbsSnapshotController corresponds to the JSON schema field - // "ebsSnapshotController". - EbsSnapshotController *SpecDistributionModulesAwsEbsSnapshotController `json:"ebsSnapshotController,omitempty" yaml:"ebsSnapshotController,omitempty" mapstructure:"ebsSnapshotController,omitempty"` +type TypesFuryModuleComponentOverridesWithIAMRoleName struct { + // IamRoleName corresponds to the JSON schema field "iamRoleName". + IamRoleName *TypesAwsIamRoleName `json:"iamRoleName,omitempty" yaml:"iamRoleName,omitempty" mapstructure:"iamRoleName,omitempty"` - // LoadBalancerController corresponds to the JSON schema field - // "loadBalancerController". - LoadBalancerController SpecDistributionModulesAwsLoadBalancerController `json:"loadBalancerController" yaml:"loadBalancerController" mapstructure:"loadBalancerController"` + // The node selector to use to place the pods for the load balancer controller + // module. + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - // Overrides corresponds to the JSON schema field "overrides". - Overrides TypesFuryModuleOverrides `json:"overrides" yaml:"overrides" mapstructure:"overrides"` + // The tolerations that will be added to the pods for the cluster autoscaler + // module. + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } type SpecDistributionModulesAwsClusterAutoscaler struct { @@ -523,6 +735,24 @@ type SpecDistributionModulesAwsClusterAutoscaler struct { Overrides *TypesFuryModuleComponentOverridesWithIAMRoleName `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAwsClusterAutoscaler) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["iamRoleArn"]; !ok || v == nil { + return fmt.Errorf("field iamRoleArn in SpecDistributionModulesAwsClusterAutoscaler: required") + } + type Plain SpecDistributionModulesAwsClusterAutoscaler + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAwsClusterAutoscaler(plain) + return nil +} + type SpecDistributionModulesAwsEbsCsiDriver struct { // IamRoleArn corresponds to the JSON schema field "iamRoleArn". IamRoleArn TypesAwsArn `json:"iamRoleArn" yaml:"iamRoleArn" mapstructure:"iamRoleArn"` @@ -531,6 +761,24 @@ type SpecDistributionModulesAwsEbsCsiDriver struct { Overrides *TypesFuryModuleComponentOverridesWithIAMRoleName `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAwsEbsCsiDriver) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["iamRoleArn"]; !ok || v == nil { + return fmt.Errorf("field iamRoleArn in SpecDistributionModulesAwsEbsCsiDriver: required") + } + type Plain SpecDistributionModulesAwsEbsCsiDriver + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAwsEbsCsiDriver(plain) + return nil +} + type SpecDistributionModulesAwsEbsSnapshotController struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` @@ -544,2610 +792,1911 @@ type SpecDistributionModulesAwsLoadBalancerController struct { Overrides *TypesFuryModuleComponentOverridesWithIAMRoleName `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } -type SpecDistributionModulesDr struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // The type of the DR, must be ***none*** or ***eks*** - Type SpecDistributionModulesDrType `json:"type" yaml:"type" mapstructure:"type"` - - // Velero corresponds to the JSON schema field "velero". - Velero *SpecDistributionModulesDrVelero `json:"velero,omitempty" yaml:"velero,omitempty" mapstructure:"velero,omitempty"` -} - -type SpecDistributionModulesDrType string - -const ( - SpecDistributionModulesDrTypeEks SpecDistributionModulesDrType = "eks" - SpecDistributionModulesDrTypeNone SpecDistributionModulesDrType = "none" -) - -type SpecDistributionModulesDrVelero struct { - // Eks corresponds to the JSON schema field "eks". - Eks SpecDistributionModulesDrVeleroEks `json:"eks" yaml:"eks" mapstructure:"eks"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` -} - -type SpecDistributionModulesDrVeleroEks struct { - // The name of the velero bucket - BucketName TypesAwsS3BucketName `json:"bucketName" yaml:"bucketName" mapstructure:"bucketName"` - - // IamRoleArn corresponds to the JSON schema field "iamRoleArn". - IamRoleArn TypesAwsArn `json:"iamRoleArn" yaml:"iamRoleArn" mapstructure:"iamRoleArn"` - - // The region where the velero bucket is located - Region TypesAwsRegion `json:"region" yaml:"region" mapstructure:"region"` -} - -type SpecDistributionModulesIngress struct { - // the base domain used for all the KFD ingresses, if in the nginx dual - // configuration, it should be the same as the - // .spec.distribution.modules.ingress.dns.private.name zone - BaseDomain string `json:"baseDomain" yaml:"baseDomain" mapstructure:"baseDomain"` - - // CertManager corresponds to the JSON schema field "certManager". - CertManager SpecDistributionModulesIngressCertManager `json:"certManager" yaml:"certManager" mapstructure:"certManager"` - - // Dns corresponds to the JSON schema field "dns". - Dns SpecDistributionModulesIngressDNS `json:"dns" yaml:"dns" mapstructure:"dns"` - - // ExternalDns corresponds to the JSON schema field "externalDns". - ExternalDns SpecDistributionModulesIngressExternalDNS `json:"externalDns" yaml:"externalDns" mapstructure:"externalDns"` - - // Forecastle corresponds to the JSON schema field "forecastle". - Forecastle *SpecDistributionModulesIngressForecastle `json:"forecastle,omitempty" yaml:"forecastle,omitempty" mapstructure:"forecastle,omitempty"` - - // Configurations for the nginx ingress controller module - Nginx SpecDistributionModulesIngressNginx `json:"nginx" yaml:"nginx" mapstructure:"nginx"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *SpecDistributionModulesIngressOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` -} - -type SpecDistributionModulesIngressCertManager struct { - // ClusterIssuer corresponds to the JSON schema field "clusterIssuer". - ClusterIssuer SpecDistributionModulesIngressCertManagerClusterIssuer `json:"clusterIssuer" yaml:"clusterIssuer" mapstructure:"clusterIssuer"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` -} - -type SpecDistributionModulesIngressCertManagerClusterIssuer struct { - // The email of the cluster issuer - Email string `json:"email" yaml:"email" mapstructure:"email"` - - // The name of the cluster issuer - Name string `json:"name" yaml:"name" mapstructure:"name"` - - // Route53 corresponds to the JSON schema field "route53". - Route53 SpecDistributionModulesIngressClusterIssuerRoute53 `json:"route53" yaml:"route53" mapstructure:"route53"` - - // The custom solvers configurations - Solvers []interface{} `json:"solvers,omitempty" yaml:"solvers,omitempty" mapstructure:"solvers,omitempty"` - - // The type of the cluster issuer, must be ***dns01*** or ***http01*** - Type *SpecDistributionModulesIngressCertManagerClusterIssuerType `json:"type,omitempty" yaml:"type,omitempty" mapstructure:"type,omitempty"` -} - -type SpecDistributionModulesIngressCertManagerClusterIssuerType string - -const ( - SpecDistributionModulesIngressCertManagerClusterIssuerTypeDns01 SpecDistributionModulesIngressCertManagerClusterIssuerType = "dns01" - SpecDistributionModulesIngressCertManagerClusterIssuerTypeHttp01 SpecDistributionModulesIngressCertManagerClusterIssuerType = "http01" -) - -type SpecDistributionModulesIngressClusterIssuerRoute53 struct { - // HostedZoneId corresponds to the JSON schema field "hostedZoneId". - HostedZoneId string `json:"hostedZoneId" yaml:"hostedZoneId" mapstructure:"hostedZoneId"` - - // IamRoleArn corresponds to the JSON schema field "iamRoleArn". - IamRoleArn TypesAwsArn `json:"iamRoleArn" yaml:"iamRoleArn" mapstructure:"iamRoleArn"` - - // Region corresponds to the JSON schema field "region". - Region TypesAwsRegion `json:"region" yaml:"region" mapstructure:"region"` -} - -type SpecDistributionModulesIngressDNS struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // Private corresponds to the JSON schema field "private". - Private SpecDistributionModulesIngressDNSPrivate `json:"private" yaml:"private" mapstructure:"private"` - - // Public corresponds to the JSON schema field "public". - Public SpecDistributionModulesIngressDNSPublic `json:"public" yaml:"public" mapstructure:"public"` -} - -type SpecDistributionModulesIngressDNSPrivate struct { - // If true, the private hosted zone will be created - Create bool `json:"create" yaml:"create" mapstructure:"create"` - - // The name of the private hosted zone - Name string `json:"name" yaml:"name" mapstructure:"name"` - - // VpcId corresponds to the JSON schema field "vpcId". - VpcId string `json:"vpcId" yaml:"vpcId" mapstructure:"vpcId"` -} - -type SpecDistributionModulesIngressDNSPublic struct { - // If true, the public hosted zone will be created - Create bool `json:"create" yaml:"create" mapstructure:"create"` - - // The name of the public hosted zone - Name string `json:"name" yaml:"name" mapstructure:"name"` -} - -type SpecDistributionModulesIngressExternalDNS struct { - // PrivateIamRoleArn corresponds to the JSON schema field "privateIamRoleArn". - PrivateIamRoleArn TypesAwsArn `json:"privateIamRoleArn" yaml:"privateIamRoleArn" mapstructure:"privateIamRoleArn"` - - // PublicIamRoleArn corresponds to the JSON schema field "publicIamRoleArn". - PublicIamRoleArn TypesAwsArn `json:"publicIamRoleArn" yaml:"publicIamRoleArn" mapstructure:"publicIamRoleArn"` -} - -type SpecDistributionModulesIngressForecastle struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` -} - -type SpecDistributionModulesIngressNginx struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // Tls corresponds to the JSON schema field "tls". - Tls *SpecDistributionModulesIngressNginxTLS `json:"tls,omitempty" yaml:"tls,omitempty" mapstructure:"tls,omitempty"` - - // The type of the nginx ingress controller, must be ***none***, ***single*** or - // ***dual*** - Type SpecDistributionModulesIngressNginxType `json:"type" yaml:"type" mapstructure:"type"` -} - -type SpecDistributionModulesIngressNginxTLS struct { - // The provider of the TLS certificate, must be ***none***, ***certManager*** or - // ***secret*** - Provider SpecDistributionModulesIngressNginxTLSProvider `json:"provider" yaml:"provider" mapstructure:"provider"` - - // Secret corresponds to the JSON schema field "secret". - Secret *SpecDistributionModulesIngressNginxTLSSecret `json:"secret,omitempty" yaml:"secret,omitempty" mapstructure:"secret,omitempty"` -} - -type SpecDistributionModulesIngressNginxTLSProvider string - -const ( - SpecDistributionModulesIngressNginxTLSProviderCertManager SpecDistributionModulesIngressNginxTLSProvider = "certManager" - SpecDistributionModulesIngressNginxTLSProviderNone SpecDistributionModulesIngressNginxTLSProvider = "none" - SpecDistributionModulesIngressNginxTLSProviderSecret SpecDistributionModulesIngressNginxTLSProvider = "secret" -) - -type SpecDistributionModulesIngressNginxTLSSecret struct { - // Ca corresponds to the JSON schema field "ca". - Ca string `json:"ca" yaml:"ca" mapstructure:"ca"` - - // The certificate file content or you can use the file notation to get the - // content from a file - Cert string `json:"cert" yaml:"cert" mapstructure:"cert"` - - // Key corresponds to the JSON schema field "key". - Key string `json:"key" yaml:"key" mapstructure:"key"` -} - -type SpecDistributionModulesIngressNginxType string - -const ( - SpecDistributionModulesIngressNginxTypeDual SpecDistributionModulesIngressNginxType = "dual" - SpecDistributionModulesIngressNginxTypeNone SpecDistributionModulesIngressNginxType = "none" - SpecDistributionModulesIngressNginxTypeSingle SpecDistributionModulesIngressNginxType = "single" -) - -type SpecDistributionModulesIngressOverrides struct { - // Ingresses corresponds to the JSON schema field "ingresses". - Ingresses *SpecDistributionModulesIngressOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` - - // The node selector to use to place the pods for the ingress module - NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - - // The tolerations that will be added to the pods for the ingress module - Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` -} - -type SpecDistributionModulesIngressOverridesIngresses struct { - // Forecastle corresponds to the JSON schema field "forecastle". - Forecastle *TypesFuryModuleOverridesIngress `json:"forecastle,omitempty" yaml:"forecastle,omitempty" mapstructure:"forecastle,omitempty"` -} - -type SpecDistributionModulesLogging struct { - // Cerebro corresponds to the JSON schema field "cerebro". - Cerebro *SpecDistributionModulesLoggingCerebro `json:"cerebro,omitempty" yaml:"cerebro,omitempty" mapstructure:"cerebro,omitempty"` - - // CustomOutputs corresponds to the JSON schema field "customOutputs". - CustomOutputs *SpecDistributionModulesLoggingCustomOutputs `json:"customOutputs,omitempty" yaml:"customOutputs,omitempty" mapstructure:"customOutputs,omitempty"` - - // Loki corresponds to the JSON schema field "loki". - Loki *SpecDistributionModulesLoggingLoki `json:"loki,omitempty" yaml:"loki,omitempty" mapstructure:"loki,omitempty"` - - // Minio corresponds to the JSON schema field "minio". - Minio *SpecDistributionModulesLoggingMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"` - - // Opensearch corresponds to the JSON schema field "opensearch". - Opensearch *SpecDistributionModulesLoggingOpensearch `json:"opensearch,omitempty" yaml:"opensearch,omitempty" mapstructure:"opensearch,omitempty"` - - // Operator corresponds to the JSON schema field "operator". - Operator *SpecDistributionModulesLoggingOperator `json:"operator,omitempty" yaml:"operator,omitempty" mapstructure:"operator,omitempty"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // selects the logging stack. Choosing none will disable the centralized logging. - // Choosing opensearch will deploy and configure the Logging Operator and an - // OpenSearch cluster (can be single or triple for HA) where the logs will be - // stored. Choosing loki will use a distributed Grafana Loki instead of OpenSearh - // for storage. Choosing customOuput the Logging Operator will be deployed and - // installed but with no local storage, you will have to create the needed Outputs - // and ClusterOutputs to ship the logs to your desired storage. - Type SpecDistributionModulesLoggingType `json:"type" yaml:"type" mapstructure:"type"` -} - -type SpecDistributionModulesLoggingCerebro struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` -} - -// when using the customOutputs logging type, you need to manually specify the spec -// of the several Output and ClusterOutputs that the Logging Operator expects to -// forward the logs collected by the pre-defined flows. -type SpecDistributionModulesLoggingCustomOutputs struct { - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. - Audit string `json:"audit" yaml:"audit" mapstructure:"audit"` - - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. - Errors string `json:"errors" yaml:"errors" mapstructure:"errors"` - - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. - Events string `json:"events" yaml:"events" mapstructure:"events"` - - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. - Infra string `json:"infra" yaml:"infra" mapstructure:"infra"` - - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. - IngressNginx string `json:"ingressNginx" yaml:"ingressNginx" mapstructure:"ingressNginx"` - - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. - Kubernetes string `json:"kubernetes" yaml:"kubernetes" mapstructure:"kubernetes"` - - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. - SystemdCommon string `json:"systemdCommon" yaml:"systemdCommon" mapstructure:"systemdCommon"` - - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. - SystemdEtcd string `json:"systemdEtcd" yaml:"systemdEtcd" mapstructure:"systemdEtcd"` -} - -type SpecDistributionModulesLoggingLoki struct { - // Backend corresponds to the JSON schema field "backend". - Backend *SpecDistributionModulesLoggingLokiBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` - - // ExternalEndpoint corresponds to the JSON schema field "externalEndpoint". - ExternalEndpoint *SpecDistributionModulesLoggingLokiExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` - - // Resources corresponds to the JSON schema field "resources". - Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` -} - -type SpecDistributionModulesLoggingLokiBackend string - -const ( - SpecDistributionModulesLoggingLokiBackendExternalEndpoint SpecDistributionModulesLoggingLokiBackend = "externalEndpoint" - SpecDistributionModulesLoggingLokiBackendMinio SpecDistributionModulesLoggingLokiBackend = "minio" -) - -type SpecDistributionModulesLoggingLokiExternalEndpoint struct { - // The access key id of the loki external endpoint - AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` - - // The bucket name of the loki external endpoint - BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` - - // The endpoint of the loki external endpoint - Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` - - // If true, the loki external endpoint will be insecure - Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` - - // The secret access key of the loki external endpoint - SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` -} - -type SpecDistributionModulesLoggingMinio struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // RootUser corresponds to the JSON schema field "rootUser". - RootUser *SpecDistributionModulesLoggingMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` - - // The PVC size for each minio disk, 6 disks total - StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` -} - -type SpecDistributionModulesLoggingMinioRootUser struct { - // The password of the minio root user - Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` - - // The username of the minio root user - Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` -} - -type SpecDistributionModulesLoggingOpensearch struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // Resources corresponds to the JSON schema field "resources". - Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` - - // The storage size for the opensearch pods - StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` - - // The type of the opensearch, must be ***single*** or ***triple*** - Type SpecDistributionModulesLoggingOpensearchType `json:"type" yaml:"type" mapstructure:"type"` -} - -type SpecDistributionModulesLoggingOpensearchType string - -const ( - SpecDistributionModulesLoggingOpensearchTypeSingle SpecDistributionModulesLoggingOpensearchType = "single" - SpecDistributionModulesLoggingOpensearchTypeTriple SpecDistributionModulesLoggingOpensearchType = "triple" -) - -type SpecDistributionModulesLoggingOperator struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` -} - -type SpecDistributionModulesLoggingType string - -const ( - SpecDistributionModulesLoggingTypeCustomOutputs SpecDistributionModulesLoggingType = "customOutputs" - SpecDistributionModulesLoggingTypeLoki SpecDistributionModulesLoggingType = "loki" - SpecDistributionModulesLoggingTypeNone SpecDistributionModulesLoggingType = "none" - SpecDistributionModulesLoggingTypeOpensearch SpecDistributionModulesLoggingType = "opensearch" -) - -// configuration for the Monitoring module components -type SpecDistributionModulesMonitoring struct { - // Alertmanager corresponds to the JSON schema field "alertmanager". - Alertmanager *SpecDistributionModulesMonitoringAlertManager `json:"alertmanager,omitempty" yaml:"alertmanager,omitempty" mapstructure:"alertmanager,omitempty"` - - // BlackboxExporter corresponds to the JSON schema field "blackboxExporter". - BlackboxExporter *SpecDistributionModulesMonitoringBlackboxExporter `json:"blackboxExporter,omitempty" yaml:"blackboxExporter,omitempty" mapstructure:"blackboxExporter,omitempty"` - - // Grafana corresponds to the JSON schema field "grafana". - Grafana *SpecDistributionModulesMonitoringGrafana `json:"grafana,omitempty" yaml:"grafana,omitempty" mapstructure:"grafana,omitempty"` - - // KubeStateMetrics corresponds to the JSON schema field "kubeStateMetrics". - KubeStateMetrics *SpecDistributionModulesMonitoringKubeStateMetrics `json:"kubeStateMetrics,omitempty" yaml:"kubeStateMetrics,omitempty" mapstructure:"kubeStateMetrics,omitempty"` - - // Mimir corresponds to the JSON schema field "mimir". - Mimir *SpecDistributionModulesMonitoringMimir `json:"mimir,omitempty" yaml:"mimir,omitempty" mapstructure:"mimir,omitempty"` - - // Minio corresponds to the JSON schema field "minio". - Minio *SpecDistributionModulesMonitoringMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // Prometheus corresponds to the JSON schema field "prometheus". - Prometheus *SpecDistributionModulesMonitoringPrometheus `json:"prometheus,omitempty" yaml:"prometheus,omitempty" mapstructure:"prometheus,omitempty"` - - // PrometheusAgent corresponds to the JSON schema field "prometheusAgent". - PrometheusAgent *SpecDistributionModulesMonitoringPrometheusAgent `json:"prometheusAgent,omitempty" yaml:"prometheusAgent,omitempty" mapstructure:"prometheusAgent,omitempty"` - - // The type of the monitoring, must be ***none***, ***prometheus***, - // ***prometheusAgent*** or ***mimir***. - // - // - `none`: will disable the whole monitoring stack. - // - `prometheus`: will install Prometheus Operator and a preconfigured Prometheus - // instance, Alertmanager, a set of alert rules, exporters needed to monitor all - // the components of the cluster, Grafana and a series of dashboards to view the - // collected metrics, and more. - // - `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus - // in Agent mode (no alerting, no queries, no storage), and all the exporters - // needed to get metrics for the status of the cluster and the workloads. Useful - // when having a centralized (remote) Prometheus where to ship the metrics and not - // storing them locally in the cluster. - // - `mimir`: will install the same as the `prometheus` option, and in addition - // Grafana Mimir that allows for longer retention of metrics and the usage of - // Object Storage. - Type SpecDistributionModulesMonitoringType `json:"type" yaml:"type" mapstructure:"type"` - - // X509Exporter corresponds to the JSON schema field "x509Exporter". - X509Exporter *SpecDistributionModulesMonitoringX509Exporter `json:"x509Exporter,omitempty" yaml:"x509Exporter,omitempty" mapstructure:"x509Exporter,omitempty"` -} - -type SpecDistributionModulesMonitoringAlertManager struct { - // The webhook url to send deadman switch monitoring, for example to use with - // healthchecks.io - DeadManSwitchWebhookUrl *string `json:"deadManSwitchWebhookUrl,omitempty" yaml:"deadManSwitchWebhookUrl,omitempty" mapstructure:"deadManSwitchWebhookUrl,omitempty"` - - // If true, the default rules will be installed - InstallDefaultRules *bool `json:"installDefaultRules,omitempty" yaml:"installDefaultRules,omitempty" mapstructure:"installDefaultRules,omitempty"` - - // The slack webhook url to send alerts - SlackWebhookUrl *string `json:"slackWebhookUrl,omitempty" yaml:"slackWebhookUrl,omitempty" mapstructure:"slackWebhookUrl,omitempty"` -} - -type SpecDistributionModulesMonitoringBlackboxExporter struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` -} - -type SpecDistributionModulesMonitoringGrafana struct { - // Setting this to true will deploy an additional `grafana-basic-auth` ingress - // protected with Grafana's basic auth instead of SSO. It's intended use is as a - // temporary ingress for when there are problems with the SSO login flow. - // - // Notice that by default anonymous access is enabled. - BasicAuthIngress *bool `json:"basicAuthIngress,omitempty" yaml:"basicAuthIngress,omitempty" mapstructure:"basicAuthIngress,omitempty"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // [JMESPath](http://jmespath.org/examples.html) expression to retrieve the user's - // role. Example: - // - // ```yaml - // usersRoleAttributePath: "contains(groups[*], 'beta') && 'Admin' || - // contains(groups[*], 'gamma') && 'Editor' || contains(groups[*], 'delta') && - // 'Viewer' - // ``` - // - // More details in [Grafana's - // documentation](https://grafana.com/docs/grafana/latest/setup-grafana/configure-security/configure-authentication/generic-oauth/#configure-role-mapping). - UsersRoleAttributePath *string `json:"usersRoleAttributePath,omitempty" yaml:"usersRoleAttributePath,omitempty" mapstructure:"usersRoleAttributePath,omitempty"` -} - -type SpecDistributionModulesMonitoringKubeStateMetrics struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` -} - -type SpecDistributionModulesMonitoringMimir struct { - // The backend for the mimir pods, must be ***minio*** or ***externalEndpoint*** - Backend *SpecDistributionModulesMonitoringMimirBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` - - // ExternalEndpoint corresponds to the JSON schema field "externalEndpoint". - ExternalEndpoint *SpecDistributionModulesMonitoringMimirExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // The retention time for the mimir pods - RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` -} - -type SpecDistributionModulesMonitoringMimirBackend string - -const ( - SpecDistributionModulesMonitoringMimirBackendExternalEndpoint SpecDistributionModulesMonitoringMimirBackend = "externalEndpoint" - SpecDistributionModulesMonitoringMimirBackendMinio SpecDistributionModulesMonitoringMimirBackend = "minio" -) - -type SpecDistributionModulesMonitoringMimirExternalEndpoint struct { - // The access key id of the external mimir backend - AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` - - // The bucket name of the external mimir backend - BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` - - // The endpoint of the external mimir backend - Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` - - // If true, the external mimir backend will not use tls - Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` - - // The secret access key of the external mimir backend - SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` -} - -type SpecDistributionModulesMonitoringMinio struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // RootUser corresponds to the JSON schema field "rootUser". - RootUser *SpecDistributionModulesMonitoringMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` - - // The storage size for the minio pods - StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` -} - -type SpecDistributionModulesMonitoringMinioRootUser struct { - // The password for the minio root user - Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` - - // The username for the minio root user - Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` -} - -type SpecDistributionModulesMonitoringPrometheus struct { - // Set this option to ship the collected metrics to a remote Prometheus receiver. - // - // `remoteWrite` is an array of objects that allows configuring the - // [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for - // Prometheus. The objects in the array follow [the same schema as in the - // prometheus - // operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec). - RemoteWrite []SpecDistributionModulesMonitoringPrometheusRemoteWriteElem `json:"remoteWrite,omitempty" yaml:"remoteWrite,omitempty" mapstructure:"remoteWrite,omitempty"` - - // Resources corresponds to the JSON schema field "resources". - Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` - - // The retention size for the k8s Prometheus instance. - RetentionSize *string `json:"retentionSize,omitempty" yaml:"retentionSize,omitempty" mapstructure:"retentionSize,omitempty"` - - // The retention time for the k8s Prometheus instance. - RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` - - // The storage size for the k8s Prometheus instance. - StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` -} - -type SpecDistributionModulesMonitoringPrometheusAgent struct { - // Set this option to ship the collected metrics to a remote Prometheus receiver. - // - // `remoteWrite` is an array of objects that allows configuring the - // [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for - // Prometheus. The objects in the array follow [the same schema as in the - // prometheus - // operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec). - RemoteWrite []SpecDistributionModulesMonitoringPrometheusAgentRemoteWriteElem `json:"remoteWrite,omitempty" yaml:"remoteWrite,omitempty" mapstructure:"remoteWrite,omitempty"` - - // Resources corresponds to the JSON schema field "resources". - Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` -} - -type SpecDistributionModulesMonitoringPrometheusAgentRemoteWriteElem map[string]interface{} - -type SpecDistributionModulesMonitoringPrometheusRemoteWriteElem map[string]interface{} - -type SpecDistributionModulesMonitoringType string - -const ( - SpecDistributionModulesMonitoringTypeMimir SpecDistributionModulesMonitoringType = "mimir" - SpecDistributionModulesMonitoringTypeNone SpecDistributionModulesMonitoringType = "none" - SpecDistributionModulesMonitoringTypePrometheus SpecDistributionModulesMonitoringType = "prometheus" - SpecDistributionModulesMonitoringTypePrometheusAgent SpecDistributionModulesMonitoringType = "prometheusAgent" -) - -type SpecDistributionModulesMonitoringX509Exporter struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` -} - -type SpecDistributionModulesNetworking struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // TigeraOperator corresponds to the JSON schema field "tigeraOperator". - TigeraOperator *SpecDistributionModulesNetworkingTigeraOperator `json:"tigeraOperator,omitempty" yaml:"tigeraOperator,omitempty" mapstructure:"tigeraOperator,omitempty"` - - // Type corresponds to the JSON schema field "type". - Type *SpecDistributionModulesNetworkingType `json:"type,omitempty" yaml:"type,omitempty" mapstructure:"type,omitempty"` -} - -type SpecDistributionModulesNetworkingTigeraOperator struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` -} - -type SpecDistributionModulesNetworkingType string - -const SpecDistributionModulesNetworkingTypeNone SpecDistributionModulesNetworkingType = "none" - -type SpecDistributionModulesPolicy struct { - // Gatekeeper corresponds to the JSON schema field "gatekeeper". - Gatekeeper *SpecDistributionModulesPolicyGatekeeper `json:"gatekeeper,omitempty" yaml:"gatekeeper,omitempty" mapstructure:"gatekeeper,omitempty"` - - // Kyverno corresponds to the JSON schema field "kyverno". - Kyverno *SpecDistributionModulesPolicyKyverno `json:"kyverno,omitempty" yaml:"kyverno,omitempty" mapstructure:"kyverno,omitempty"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // The type of security to use, either ***none***, ***gatekeeper*** or - // ***kyverno*** - Type SpecDistributionModulesPolicyType `json:"type" yaml:"type" mapstructure:"type"` -} - -type SpecDistributionModulesPolicyGatekeeper struct { - // This parameter adds namespaces to Gatekeeper's exemption list, so it will not - // enforce the constraints on them. - AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"` - - // The enforcement action to use for the gatekeeper module - EnforcementAction SpecDistributionModulesPolicyGatekeeperEnforcementAction `json:"enforcementAction" yaml:"enforcementAction" mapstructure:"enforcementAction"` - - // If true, the default policies will be installed - InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` -} - -type SpecDistributionModulesPolicyGatekeeperEnforcementAction string - -const ( - SpecDistributionModulesPolicyGatekeeperEnforcementActionDeny SpecDistributionModulesPolicyGatekeeperEnforcementAction = "deny" - SpecDistributionModulesPolicyGatekeeperEnforcementActionDryrun SpecDistributionModulesPolicyGatekeeperEnforcementAction = "dryrun" - SpecDistributionModulesPolicyGatekeeperEnforcementActionWarn SpecDistributionModulesPolicyGatekeeperEnforcementAction = "warn" -) - -type SpecDistributionModulesPolicyKyverno struct { - // This parameter adds namespaces to Kyverno's exemption list, so it will not - // enforce the constraints on them. - AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"` - - // If true, the default policies will be installed - InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // The validation failure action to use for the kyverno module - ValidationFailureAction SpecDistributionModulesPolicyKyvernoValidationFailureAction `json:"validationFailureAction" yaml:"validationFailureAction" mapstructure:"validationFailureAction"` -} - -type SpecDistributionModulesPolicyKyvernoValidationFailureAction string - -const ( - SpecDistributionModulesPolicyKyvernoValidationFailureActionAudit SpecDistributionModulesPolicyKyvernoValidationFailureAction = "audit" - SpecDistributionModulesPolicyKyvernoValidationFailureActionEnforce SpecDistributionModulesPolicyKyvernoValidationFailureAction = "enforce" -) - -type SpecDistributionModulesPolicyType string - -const ( - SpecDistributionModulesPolicyTypeGatekeeper SpecDistributionModulesPolicyType = "gatekeeper" - SpecDistributionModulesPolicyTypeKyverno SpecDistributionModulesPolicyType = "kyverno" - SpecDistributionModulesPolicyTypeNone SpecDistributionModulesPolicyType = "none" -) - -type SpecDistributionModulesTracing struct { - // Minio corresponds to the JSON schema field "minio". - Minio *SpecDistributionModulesTracingMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // Tempo corresponds to the JSON schema field "tempo". - Tempo *SpecDistributionModulesTracingTempo `json:"tempo,omitempty" yaml:"tempo,omitempty" mapstructure:"tempo,omitempty"` - - // The type of tracing to use, either ***none*** or ***tempo*** - Type SpecDistributionModulesTracingType `json:"type" yaml:"type" mapstructure:"type"` -} - -type SpecDistributionModulesTracingMinio struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // RootUser corresponds to the JSON schema field "rootUser". - RootUser *SpecDistributionModulesTracingMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` - - // The storage size for the minio pods - StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` -} - -type SpecDistributionModulesTracingMinioRootUser struct { - // The password for the minio root user - Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` - - // The username for the minio root user - Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` -} - -type SpecDistributionModulesTracingTempo struct { - // The backend for the tempo pods, must be ***minio*** or ***externalEndpoint*** - Backend *SpecDistributionModulesTracingTempoBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` - - // ExternalEndpoint corresponds to the JSON schema field "externalEndpoint". - ExternalEndpoint *SpecDistributionModulesTracingTempoExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // The retention time for the tempo pods - RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` -} - -type SpecDistributionModulesTracingTempoBackend string - -const ( - SpecDistributionModulesTracingTempoBackendExternalEndpoint SpecDistributionModulesTracingTempoBackend = "externalEndpoint" - SpecDistributionModulesTracingTempoBackendMinio SpecDistributionModulesTracingTempoBackend = "minio" -) - -type SpecDistributionModulesTracingTempoExternalEndpoint struct { - // The access key id of the external tempo backend - AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` - - // The bucket name of the external tempo backend - BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` - - // The endpoint of the external tempo backend - Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` - - // If true, the external tempo backend will not use tls - Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` - - // The secret access key of the external tempo backend - SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` -} - -type SpecDistributionModulesTracingType string - -const ( - SpecDistributionModulesTracingTypeNone SpecDistributionModulesTracingType = "none" - SpecDistributionModulesTracingTypeTempo SpecDistributionModulesTracingType = "tempo" -) - -type SpecInfrastructure struct { - // This key defines the VPC that will be created in AWS - Vpc *SpecInfrastructureVpc `json:"vpc,omitempty" yaml:"vpc,omitempty" mapstructure:"vpc,omitempty"` - - // This section defines the creation of VPN bastions - Vpn *SpecInfrastructureVpn `json:"vpn,omitempty" yaml:"vpn,omitempty" mapstructure:"vpn,omitempty"` -} - -type SpecInfrastructureVpc struct { - // Network corresponds to the JSON schema field "network". - Network SpecInfrastructureVpcNetwork `json:"network" yaml:"network" mapstructure:"network"` -} - -type SpecInfrastructureVpcNetwork struct { - // This is the CIDR of the VPC that will be created - Cidr TypesCidr `json:"cidr" yaml:"cidr" mapstructure:"cidr"` - - // SubnetsCidrs corresponds to the JSON schema field "subnetsCidrs". - SubnetsCidrs SpecInfrastructureVpcNetworkSubnetsCidrs `json:"subnetsCidrs" yaml:"subnetsCidrs" mapstructure:"subnetsCidrs"` -} - -type SpecInfrastructureVpcNetworkSubnetsCidrs struct { - // These are the CIRDs for the private subnets, where the nodes, the pods, and the - // private load balancers will be created - Private []TypesCidr `json:"private" yaml:"private" mapstructure:"private"` - - // These are the CIDRs for the public subnets, where the public load balancers and - // the VPN servers will be created - Public []TypesCidr `json:"public" yaml:"public" mapstructure:"public"` -} - -type SpecInfrastructureVpn struct { - // This value defines the prefix that will be used to create the bucket name where - // the VPN servers will store the states - BucketNamePrefix *TypesAwsS3BucketNamePrefix `json:"bucketNamePrefix,omitempty" yaml:"bucketNamePrefix,omitempty" mapstructure:"bucketNamePrefix,omitempty"` - - // The dhParamsBits size used for the creation of the .pem file that will be used - // in the dh openvpn server.conf file - DhParamsBits *int `json:"dhParamsBits,omitempty" yaml:"dhParamsBits,omitempty" mapstructure:"dhParamsBits,omitempty"` - - // The size of the disk in GB - DiskSize *int `json:"diskSize,omitempty" yaml:"diskSize,omitempty" mapstructure:"diskSize,omitempty"` - - // Overrides the default IAM user name for the VPN - IamUserNameOverride *TypesAwsIamRoleName `json:"iamUserNameOverride,omitempty" yaml:"iamUserNameOverride,omitempty" mapstructure:"iamUserNameOverride,omitempty"` - - // The size of the AWS EC2 instance - InstanceType *string `json:"instanceType,omitempty" yaml:"instanceType,omitempty" mapstructure:"instanceType,omitempty"` - - // The number of instances to create, 0 to skip the creation - Instances *int `json:"instances,omitempty" yaml:"instances,omitempty" mapstructure:"instances,omitempty"` - - // The username of the account to create in the bastion's operating system - OperatorName *string `json:"operatorName,omitempty" yaml:"operatorName,omitempty" mapstructure:"operatorName,omitempty"` - - // The port used by the OpenVPN server - Port *TypesTcpPort `json:"port,omitempty" yaml:"port,omitempty" mapstructure:"port,omitempty"` - - // Ssh corresponds to the JSON schema field "ssh". - Ssh SpecInfrastructureVpnSsh `json:"ssh" yaml:"ssh" mapstructure:"ssh"` - - // The VPC ID where the VPN servers will be created, required only if - // .spec.infrastructure.vpc is omitted - VpcId *TypesAwsVpcId `json:"vpcId,omitempty" yaml:"vpcId,omitempty" mapstructure:"vpcId,omitempty"` - - // The CIDR that will be used to assign IP addresses to the VPN clients when - // connected - VpnClientsSubnetCidr TypesCidr `json:"vpnClientsSubnetCidr" yaml:"vpnClientsSubnetCidr" mapstructure:"vpnClientsSubnetCidr"` -} - -type SpecInfrastructureVpnSsh struct { - // The CIDR enabled in the security group that can access the bastions in SSH - AllowedFromCidrs []TypesCidr `json:"allowedFromCidrs" yaml:"allowedFromCidrs" mapstructure:"allowedFromCidrs"` - - // The github user name list that will be used to get the ssh public key that will - // be added as authorized key to the operatorName user - GithubUsersName []string `json:"githubUsersName" yaml:"githubUsersName" mapstructure:"githubUsersName"` - - // This value defines the public keys that will be added to the bastion's - // operating system NOTES: Not yet implemented - PublicKeys []interface{} `json:"publicKeys,omitempty" yaml:"publicKeys,omitempty" mapstructure:"publicKeys,omitempty"` -} - -type SpecKubernetes struct { - // ApiServer corresponds to the JSON schema field "apiServer". - ApiServer SpecKubernetesAPIServer `json:"apiServer" yaml:"apiServer" mapstructure:"apiServer"` - - // AwsAuth corresponds to the JSON schema field "awsAuth". - AwsAuth *SpecKubernetesAwsAuth `json:"awsAuth,omitempty" yaml:"awsAuth,omitempty" mapstructure:"awsAuth,omitempty"` - - // Overrides the default IAM role name prefix for the EKS cluster - ClusterIAMRoleNamePrefixOverride *TypesAwsIamRoleNamePrefix `json:"clusterIAMRoleNamePrefixOverride,omitempty" yaml:"clusterIAMRoleNamePrefixOverride,omitempty" mapstructure:"clusterIAMRoleNamePrefixOverride,omitempty"` - - // Optional Kubernetes Cluster log retention in days. Defaults to 90 days. - LogRetentionDays *int `json:"logRetentionDays,omitempty" yaml:"logRetentionDays,omitempty" mapstructure:"logRetentionDays,omitempty"` - - // Optional list of Kubernetes Cluster log types to enable. Defaults to all types. - LogsTypes []SpecKubernetesLogsTypesElem `json:"logsTypes,omitempty" yaml:"logsTypes,omitempty" mapstructure:"logsTypes,omitempty"` - - // This key contains the ssh public key that can connect to the nodes via SSH - // using the ec2-user user - NodeAllowedSshPublicKey interface{} `json:"nodeAllowedSshPublicKey" yaml:"nodeAllowedSshPublicKey" mapstructure:"nodeAllowedSshPublicKey"` - - // NodePools corresponds to the JSON schema field "nodePools". - NodePools []SpecKubernetesNodePool `json:"nodePools" yaml:"nodePools" mapstructure:"nodePools"` - - // Either `launch_configurations`, `launch_templates` or `both`. For new clusters - // use `launch_templates`, for existing cluster you'll need to migrate from - // `launch_configurations` to `launch_templates` using `both` as interim. - NodePoolsLaunchKind SpecKubernetesNodePoolsLaunchKind `json:"nodePoolsLaunchKind" yaml:"nodePoolsLaunchKind" mapstructure:"nodePoolsLaunchKind"` - - // This value defines the CIDR that will be used to assign IP addresses to the - // services - ServiceIpV4Cidr *TypesCidr `json:"serviceIpV4Cidr,omitempty" yaml:"serviceIpV4Cidr,omitempty" mapstructure:"serviceIpV4Cidr,omitempty"` - - // This value defines the subnet IDs where the EKS cluster will be created, - // required only if .spec.infrastructure.vpc is omitted - SubnetIds []TypesAwsSubnetId `json:"subnetIds,omitempty" yaml:"subnetIds,omitempty" mapstructure:"subnetIds,omitempty"` - - // This value defines the VPC ID where the EKS cluster will be created, required - // only if .spec.infrastructure.vpc is omitted - VpcId *TypesAwsVpcId `json:"vpcId,omitempty" yaml:"vpcId,omitempty" mapstructure:"vpcId,omitempty"` - - // Overrides the default IAM role name prefix for the EKS workers - WorkersIAMRoleNamePrefixOverride *TypesAwsIamRoleNamePrefix `json:"workersIAMRoleNamePrefixOverride,omitempty" yaml:"workersIAMRoleNamePrefixOverride,omitempty" mapstructure:"workersIAMRoleNamePrefixOverride,omitempty"` -} - -type SpecKubernetesAPIServer struct { - // This value defines if the API server will be accessible only from the private - // subnets - PrivateAccess bool `json:"privateAccess" yaml:"privateAccess" mapstructure:"privateAccess"` - - // This value defines the CIDRs that will be allowed to access the API server from - // the private subnets - PrivateAccessCidrs []TypesCidr `json:"privateAccessCidrs,omitempty" yaml:"privateAccessCidrs,omitempty" mapstructure:"privateAccessCidrs,omitempty"` - - // This value defines if the API server will be accessible from the public subnets - PublicAccess bool `json:"publicAccess" yaml:"publicAccess" mapstructure:"publicAccess"` - - // This value defines the CIDRs that will be allowed to access the API server from - // the public subnets - PublicAccessCidrs []TypesCidr `json:"publicAccessCidrs,omitempty" yaml:"publicAccessCidrs,omitempty" mapstructure:"publicAccessCidrs,omitempty"` -} - -type SpecKubernetesAwsAuth struct { - // This optional array defines additional AWS accounts that will be added to the - // aws-auth configmap - AdditionalAccounts []string `json:"additionalAccounts,omitempty" yaml:"additionalAccounts,omitempty" mapstructure:"additionalAccounts,omitempty"` - - // This optional array defines additional IAM roles that will be added to the - // aws-auth configmap - Roles []SpecKubernetesAwsAuthRole `json:"roles,omitempty" yaml:"roles,omitempty" mapstructure:"roles,omitempty"` - - // This optional array defines additional IAM users that will be added to the - // aws-auth configmap - Users []SpecKubernetesAwsAuthUser `json:"users,omitempty" yaml:"users,omitempty" mapstructure:"users,omitempty"` -} - -type SpecKubernetesAwsAuthRole struct { - // Groups corresponds to the JSON schema field "groups". - Groups []string `json:"groups" yaml:"groups" mapstructure:"groups"` - - // Rolearn corresponds to the JSON schema field "rolearn". - Rolearn TypesAwsArn `json:"rolearn" yaml:"rolearn" mapstructure:"rolearn"` - - // Username corresponds to the JSON schema field "username". - Username string `json:"username" yaml:"username" mapstructure:"username"` -} - -type SpecKubernetesAwsAuthUser struct { - // Groups corresponds to the JSON schema field "groups". - Groups []string `json:"groups" yaml:"groups" mapstructure:"groups"` - - // Userarn corresponds to the JSON schema field "userarn". - Userarn TypesAwsArn `json:"userarn" yaml:"userarn" mapstructure:"userarn"` - - // Username corresponds to the JSON schema field "username". - Username string `json:"username" yaml:"username" mapstructure:"username"` -} - -type SpecKubernetesLogsTypesElem string - -const ( - SpecKubernetesLogsTypesElemApi SpecKubernetesLogsTypesElem = "api" - SpecKubernetesLogsTypesElemAudit SpecKubernetesLogsTypesElem = "audit" - SpecKubernetesLogsTypesElemAuthenticator SpecKubernetesLogsTypesElem = "authenticator" - SpecKubernetesLogsTypesElemControllerManager SpecKubernetesLogsTypesElem = "controllerManager" - SpecKubernetesLogsTypesElemScheduler SpecKubernetesLogsTypesElem = "scheduler" -) - -type SpecKubernetesNodePool struct { - // AdditionalFirewallRules corresponds to the JSON schema field - // "additionalFirewallRules". - AdditionalFirewallRules *SpecKubernetesNodePoolAdditionalFirewallRules `json:"additionalFirewallRules,omitempty" yaml:"additionalFirewallRules,omitempty" mapstructure:"additionalFirewallRules,omitempty"` - - // Ami corresponds to the JSON schema field "ami". - Ami *SpecKubernetesNodePoolAmi `json:"ami,omitempty" yaml:"ami,omitempty" mapstructure:"ami,omitempty"` - - // This optional array defines additional target groups to attach to the instances - // in the node pool - AttachedTargetGroups []TypesAwsArn `json:"attachedTargetGroups,omitempty" yaml:"attachedTargetGroups,omitempty" mapstructure:"attachedTargetGroups,omitempty"` - - // The container runtime to use for the nodes - ContainerRuntime *SpecKubernetesNodePoolContainerRuntime `json:"containerRuntime,omitempty" yaml:"containerRuntime,omitempty" mapstructure:"containerRuntime,omitempty"` - - // Instance corresponds to the JSON schema field "instance". - Instance SpecKubernetesNodePoolInstance `json:"instance" yaml:"instance" mapstructure:"instance"` - - // Kubernetes labels that will be added to the nodes - Labels TypesKubeLabels_1 `json:"labels,omitempty" yaml:"labels,omitempty" mapstructure:"labels,omitempty"` - - // The name of the node pool - Name string `json:"name" yaml:"name" mapstructure:"name"` - - // Size corresponds to the JSON schema field "size". - Size SpecKubernetesNodePoolSize `json:"size" yaml:"size" mapstructure:"size"` - - // This value defines the subnet IDs where the nodes will be created - SubnetIds []TypesAwsSubnetId `json:"subnetIds,omitempty" yaml:"subnetIds,omitempty" mapstructure:"subnetIds,omitempty"` - - // AWS tags that will be added to the ASG and EC2 instances - Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"` - - // Kubernetes taints that will be added to the nodes - Taints TypesKubeTaints `json:"taints,omitempty" yaml:"taints,omitempty" mapstructure:"taints,omitempty"` - - // Type corresponds to the JSON schema field "type". - Type *SpecKubernetesNodePoolType `json:"type,omitempty" yaml:"type,omitempty" mapstructure:"type,omitempty"` -} - -type SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock struct { - // CidrBlocks corresponds to the JSON schema field "cidrBlocks". - CidrBlocks []TypesCidr `json:"cidrBlocks" yaml:"cidrBlocks" mapstructure:"cidrBlocks"` - - // Name corresponds to the JSON schema field "name". - Name string `json:"name" yaml:"name" mapstructure:"name"` - - // Ports corresponds to the JSON schema field "ports". - Ports SpecKubernetesNodePoolAdditionalFirewallRulePorts `json:"ports" yaml:"ports" mapstructure:"ports"` - - // Protocol corresponds to the JSON schema field "protocol". - Protocol TypesAwsIpProtocol `json:"protocol" yaml:"protocol" mapstructure:"protocol"` - - // Tags corresponds to the JSON schema field "tags". - Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"` - - // Type corresponds to the JSON schema field "type". - Type SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType `json:"type" yaml:"type" mapstructure:"type"` -} - -type SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType string - -const ( - SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockTypeEgress SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType = "egress" - SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockTypeIngress SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType = "ingress" -) - -type SpecKubernetesNodePoolAdditionalFirewallRulePorts struct { - // From corresponds to the JSON schema field "from". - From TypesTcpPort `json:"from" yaml:"from" mapstructure:"from"` - - // To corresponds to the JSON schema field "to". - To TypesTcpPort `json:"to" yaml:"to" mapstructure:"to"` -} - -type SpecKubernetesNodePoolAdditionalFirewallRuleSelf struct { - // The name of the FW rule - Name string `json:"name" yaml:"name" mapstructure:"name"` - - // Ports corresponds to the JSON schema field "ports". - Ports SpecKubernetesNodePoolAdditionalFirewallRulePorts `json:"ports" yaml:"ports" mapstructure:"ports"` - - // The protocol of the FW rule - Protocol TypesAwsIpProtocol `json:"protocol" yaml:"protocol" mapstructure:"protocol"` - - // If true, the source will be the security group itself - Self bool `json:"self" yaml:"self" mapstructure:"self"` - - // The tags of the FW rule - Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"` - - // The type of the FW rule can be ingress or egress - Type SpecKubernetesNodePoolAdditionalFirewallRuleSelfType `json:"type" yaml:"type" mapstructure:"type"` -} - -type SpecKubernetesNodePoolAdditionalFirewallRuleSelfType string - -const ( - SpecKubernetesNodePoolAdditionalFirewallRuleSelfTypeEgress SpecKubernetesNodePoolAdditionalFirewallRuleSelfType = "egress" - SpecKubernetesNodePoolAdditionalFirewallRuleSelfTypeIngress SpecKubernetesNodePoolAdditionalFirewallRuleSelfType = "ingress" -) - -type SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId struct { - // The name of the FW rule - Name string `json:"name" yaml:"name" mapstructure:"name"` - - // Ports corresponds to the JSON schema field "ports". - Ports SpecKubernetesNodePoolAdditionalFirewallRulePorts `json:"ports" yaml:"ports" mapstructure:"ports"` - - // The protocol of the FW rule - Protocol TypesAwsIpProtocol `json:"protocol" yaml:"protocol" mapstructure:"protocol"` - - // The source security group ID - SourceSecurityGroupId string `json:"sourceSecurityGroupId" yaml:"sourceSecurityGroupId" mapstructure:"sourceSecurityGroupId"` - - // The tags of the FW rule - Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"` - - // The type of the FW rule can be ingress or egress - Type SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType `json:"type" yaml:"type" mapstructure:"type"` -} - -type SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType string - -const ( - SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdTypeEgress SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType = "egress" - SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdTypeIngress SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType = "ingress" -) - -type SpecKubernetesNodePoolAdditionalFirewallRules struct { - // The CIDR blocks for the FW rule. At the moment the first item of the list will - // be used, others will be ignored. - CidrBlocks []SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock `json:"cidrBlocks,omitempty" yaml:"cidrBlocks,omitempty" mapstructure:"cidrBlocks,omitempty"` - - // Self corresponds to the JSON schema field "self". - Self []SpecKubernetesNodePoolAdditionalFirewallRuleSelf `json:"self,omitempty" yaml:"self,omitempty" mapstructure:"self,omitempty"` - - // SourceSecurityGroupId corresponds to the JSON schema field - // "sourceSecurityGroupId". - SourceSecurityGroupId []SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId `json:"sourceSecurityGroupId,omitempty" yaml:"sourceSecurityGroupId,omitempty" mapstructure:"sourceSecurityGroupId,omitempty"` -} - -type SpecKubernetesNodePoolAmi struct { - // The AMI ID to use for the nodes - Id string `json:"id" yaml:"id" mapstructure:"id"` - - // The owner of the AMI - Owner string `json:"owner" yaml:"owner" mapstructure:"owner"` -} - -type SpecKubernetesNodePoolContainerRuntime string - -const ( - SpecKubernetesNodePoolContainerRuntimeContainerd SpecKubernetesNodePoolContainerRuntime = "containerd" - SpecKubernetesNodePoolContainerRuntimeDocker SpecKubernetesNodePoolContainerRuntime = "docker" -) - -type SpecKubernetesNodePoolInstance struct { - // MaxPods corresponds to the JSON schema field "maxPods". - MaxPods *int `json:"maxPods,omitempty" yaml:"maxPods,omitempty" mapstructure:"maxPods,omitempty"` - - // If true, the nodes will be created as spot instances - Spot *bool `json:"spot,omitempty" yaml:"spot,omitempty" mapstructure:"spot,omitempty"` - - // The instance type to use for the nodes - Type string `json:"type" yaml:"type" mapstructure:"type"` - - // The size of the disk in GB - VolumeSize *int `json:"volumeSize,omitempty" yaml:"volumeSize,omitempty" mapstructure:"volumeSize,omitempty"` - - // VolumeType corresponds to the JSON schema field "volumeType". - VolumeType *SpecKubernetesNodePoolInstanceVolumeType `json:"volumeType,omitempty" yaml:"volumeType,omitempty" mapstructure:"volumeType,omitempty"` -} - -type SpecKubernetesNodePoolInstanceVolumeType string - -const ( - SpecKubernetesNodePoolInstanceVolumeTypeGp2 SpecKubernetesNodePoolInstanceVolumeType = "gp2" - SpecKubernetesNodePoolInstanceVolumeTypeGp3 SpecKubernetesNodePoolInstanceVolumeType = "gp3" - SpecKubernetesNodePoolInstanceVolumeTypeIo1 SpecKubernetesNodePoolInstanceVolumeType = "io1" - SpecKubernetesNodePoolInstanceVolumeTypeStandard SpecKubernetesNodePoolInstanceVolumeType = "standard" -) - -type SpecKubernetesNodePoolSize struct { - // The maximum number of nodes in the node pool - Max int `json:"max" yaml:"max" mapstructure:"max"` - - // The minimum number of nodes in the node pool - Min int `json:"min" yaml:"min" mapstructure:"min"` -} - -type SpecKubernetesNodePoolType string - -const ( - SpecKubernetesNodePoolTypeEksManaged SpecKubernetesNodePoolType = "eks-managed" - SpecKubernetesNodePoolTypeSelfManaged SpecKubernetesNodePoolType = "self-managed" -) - -type SpecKubernetesNodePoolsLaunchKind string - -const ( - SpecKubernetesNodePoolsLaunchKindBoth SpecKubernetesNodePoolsLaunchKind = "both" - SpecKubernetesNodePoolsLaunchKindLaunchConfigurations SpecKubernetesNodePoolsLaunchKind = "launch_configurations" - SpecKubernetesNodePoolsLaunchKindLaunchTemplates SpecKubernetesNodePoolsLaunchKind = "launch_templates" -) - -type SpecPlugins struct { - // Helm corresponds to the JSON schema field "helm". - Helm *SpecPluginsHelm `json:"helm,omitempty" yaml:"helm,omitempty" mapstructure:"helm,omitempty"` - - // Kustomize corresponds to the JSON schema field "kustomize". - Kustomize SpecPluginsKustomize `json:"kustomize,omitempty" yaml:"kustomize,omitempty" mapstructure:"kustomize,omitempty"` -} - -type SpecPluginsHelm struct { - // Releases corresponds to the JSON schema field "releases". - Releases SpecPluginsHelmReleases `json:"releases,omitempty" yaml:"releases,omitempty" mapstructure:"releases,omitempty"` - - // Repositories corresponds to the JSON schema field "repositories". - Repositories SpecPluginsHelmRepositories `json:"repositories,omitempty" yaml:"repositories,omitempty" mapstructure:"repositories,omitempty"` -} - -type SpecPluginsHelmReleases []struct { - // The chart of the release - Chart string `json:"chart" yaml:"chart" mapstructure:"chart"` - - // The name of the release - Name string `json:"name" yaml:"name" mapstructure:"name"` - - // The namespace of the release - Namespace string `json:"namespace" yaml:"namespace" mapstructure:"namespace"` - - // Set corresponds to the JSON schema field "set". - Set []SpecPluginsHelmReleasesElemSetElem `json:"set,omitempty" yaml:"set,omitempty" mapstructure:"set,omitempty"` - - // The values of the release - Values []string `json:"values,omitempty" yaml:"values,omitempty" mapstructure:"values,omitempty"` - - // The version of the release - Version *string `json:"version,omitempty" yaml:"version,omitempty" mapstructure:"version,omitempty"` -} - -type SpecPluginsHelmReleasesElemSetElem struct { - // The name of the set - Name string `json:"name" yaml:"name" mapstructure:"name"` - - // The value of the set - Value string `json:"value" yaml:"value" mapstructure:"value"` -} - -type SpecPluginsHelmRepositories []struct { - // The name of the repository - Name string `json:"name" yaml:"name" mapstructure:"name"` - - // The url of the repository - Url string `json:"url" yaml:"url" mapstructure:"url"` -} - -type SpecPluginsKustomize []struct { - // The folder of the kustomize plugin - Folder string `json:"folder" yaml:"folder" mapstructure:"folder"` - - // The name of the kustomize plugin - Name string `json:"name" yaml:"name" mapstructure:"name"` -} - -type SpecToolsConfiguration struct { - // Terraform corresponds to the JSON schema field "terraform". - Terraform SpecToolsConfigurationTerraform `json:"terraform" yaml:"terraform" mapstructure:"terraform"` -} - -type SpecToolsConfigurationTerraform struct { - // State corresponds to the JSON schema field "state". - State SpecToolsConfigurationTerraformState `json:"state" yaml:"state" mapstructure:"state"` -} - -type SpecToolsConfigurationTerraformState struct { - // S3 corresponds to the JSON schema field "s3". - S3 SpecToolsConfigurationTerraformStateS3 `json:"s3" yaml:"s3" mapstructure:"s3"` -} - -type SpecToolsConfigurationTerraformStateS3 struct { - // This value defines which bucket will be used to store all the states - BucketName TypesAwsS3BucketName `json:"bucketName" yaml:"bucketName" mapstructure:"bucketName"` - - // This value defines which folder will be used to store all the states inside the - // bucket - KeyPrefix TypesAwsS3KeyPrefix `json:"keyPrefix" yaml:"keyPrefix" mapstructure:"keyPrefix"` - - // This value defines in which region the bucket is located - Region TypesAwsRegion `json:"region" yaml:"region" mapstructure:"region"` - - // This value defines if the region of the bucket should be validated or not by - // Terraform, useful when using a bucket in a recently added region - SkipRegionValidation *bool `json:"skipRegionValidation,omitempty" yaml:"skipRegionValidation,omitempty" mapstructure:"skipRegionValidation,omitempty"` -} - -type TypesAwsArn string - -type TypesAwsIamRoleName string - -type TypesAwsIamRoleNamePrefix string - -type TypesAwsIpProtocol string - -type TypesAwsRegion string - -const ( - TypesAwsRegionAfSouth1 TypesAwsRegion = "af-south-1" - TypesAwsRegionApEast1 TypesAwsRegion = "ap-east-1" - TypesAwsRegionApNortheast1 TypesAwsRegion = "ap-northeast-1" - TypesAwsRegionApNortheast2 TypesAwsRegion = "ap-northeast-2" - TypesAwsRegionApNortheast3 TypesAwsRegion = "ap-northeast-3" - TypesAwsRegionApSouth1 TypesAwsRegion = "ap-south-1" - TypesAwsRegionApSouth2 TypesAwsRegion = "ap-south-2" - TypesAwsRegionApSoutheast1 TypesAwsRegion = "ap-southeast-1" - TypesAwsRegionApSoutheast2 TypesAwsRegion = "ap-southeast-2" - TypesAwsRegionApSoutheast3 TypesAwsRegion = "ap-southeast-3" - TypesAwsRegionApSoutheast4 TypesAwsRegion = "ap-southeast-4" - TypesAwsRegionCaCentral1 TypesAwsRegion = "ca-central-1" - TypesAwsRegionEuCentral1 TypesAwsRegion = "eu-central-1" - TypesAwsRegionEuCentral2 TypesAwsRegion = "eu-central-2" - TypesAwsRegionEuNorth1 TypesAwsRegion = "eu-north-1" - TypesAwsRegionEuSouth1 TypesAwsRegion = "eu-south-1" - TypesAwsRegionEuSouth2 TypesAwsRegion = "eu-south-2" - TypesAwsRegionEuWest1 TypesAwsRegion = "eu-west-1" - TypesAwsRegionEuWest2 TypesAwsRegion = "eu-west-2" - TypesAwsRegionEuWest3 TypesAwsRegion = "eu-west-3" - TypesAwsRegionMeCentral1 TypesAwsRegion = "me-central-1" - TypesAwsRegionMeSouth1 TypesAwsRegion = "me-south-1" - TypesAwsRegionSaEast1 TypesAwsRegion = "sa-east-1" - TypesAwsRegionUsEast1 TypesAwsRegion = "us-east-1" - TypesAwsRegionUsEast2 TypesAwsRegion = "us-east-2" - TypesAwsRegionUsGovEast1 TypesAwsRegion = "us-gov-east-1" - TypesAwsRegionUsGovWest1 TypesAwsRegion = "us-gov-west-1" - TypesAwsRegionUsWest1 TypesAwsRegion = "us-west-1" - TypesAwsRegionUsWest2 TypesAwsRegion = "us-west-2" -) - -type TypesAwsS3BucketName string - -type TypesAwsS3BucketNamePrefix string - -type TypesAwsS3KeyPrefix string - -type TypesAwsSshPubKey string - -type TypesAwsSubnetId string - -type TypesAwsTags map[string]string - -type TypesAwsVpcId string - -type TypesCidr string - -type TypesEnvRef string - -type TypesFileRef string - -type TypesFuryModuleComponentOverrides struct { - // The node selector to use to place the pods for the minio module - NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - - // The tolerations that will be added to the pods for the cert-manager module - Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` -} - -type TypesFuryModuleComponentOverridesWithIAMRoleName struct { - // IamRoleName corresponds to the JSON schema field "iamRoleName". - IamRoleName *TypesAwsIamRoleName `json:"iamRoleName,omitempty" yaml:"iamRoleName,omitempty" mapstructure:"iamRoleName,omitempty"` - - // The node selector to use to place the pods for the load balancer controller - // module - NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - - // The tolerations that will be added to the pods for the cluster autoscaler - // module - Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` -} - -type TypesFuryModuleComponentOverrides_1 struct { - // NodeSelector corresponds to the JSON schema field "nodeSelector". - NodeSelector TypesKubeNodeSelector_1 `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - - // Tolerations corresponds to the JSON schema field "tolerations". - Tolerations []TypesKubeToleration_1 `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` -} - -type TypesFuryModuleOverrides struct { - // Ingresses corresponds to the JSON schema field "ingresses". - Ingresses TypesFuryModuleOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` - - // The node selector to use to place the pods for the dr module - NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - - // The tolerations that will be added to the pods for the monitoring module - Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` -} - -type TypesFuryModuleOverridesIngress struct { - // If true, the ingress will not have authentication - DisableAuth *bool `json:"disableAuth,omitempty" yaml:"disableAuth,omitempty" mapstructure:"disableAuth,omitempty"` - - // The host of the ingress - Host *string `json:"host,omitempty" yaml:"host,omitempty" mapstructure:"host,omitempty"` - - // The ingress class of the ingress - IngressClass *string `json:"ingressClass,omitempty" yaml:"ingressClass,omitempty" mapstructure:"ingressClass,omitempty"` -} - -type TypesFuryModuleOverridesIngresses map[string]TypesFuryModuleOverridesIngress - -type TypesIpAddress string - -type TypesKubeLabels map[string]string - -type TypesKubeLabels_1 map[string]string - -type TypesKubeNodeSelector map[string]string - -type TypesKubeNodeSelector_1 map[string]string - -type TypesKubeResources struct { - // Limits corresponds to the JSON schema field "limits". - Limits *TypesKubeResourcesLimits `json:"limits,omitempty" yaml:"limits,omitempty" mapstructure:"limits,omitempty"` - - // Requests corresponds to the JSON schema field "requests". - Requests *TypesKubeResourcesRequests `json:"requests,omitempty" yaml:"requests,omitempty" mapstructure:"requests,omitempty"` -} - -type TypesKubeResourcesLimits struct { - // The cpu limit for the opensearch pods - Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` - - // The memory limit for the opensearch pods - Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` -} - -type TypesKubeResourcesRequests struct { - // The cpu request for the prometheus pods - Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` - - // The memory request for the opensearch pods - Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` -} - -type TypesKubeTaints []string - -type TypesKubeToleration struct { - // Effect corresponds to the JSON schema field "effect". - Effect TypesKubeTolerationEffect `json:"effect" yaml:"effect" mapstructure:"effect"` - - // The key of the toleration - Key string `json:"key" yaml:"key" mapstructure:"key"` - - // Operator corresponds to the JSON schema field "operator". - Operator *TypesKubeTolerationOperator `json:"operator,omitempty" yaml:"operator,omitempty" mapstructure:"operator,omitempty"` - - // The value of the toleration - Value *string `json:"value,omitempty" yaml:"value,omitempty" mapstructure:"value,omitempty"` -} - -type TypesKubeTolerationEffect string - -const ( - TypesKubeTolerationEffectNoExecute TypesKubeTolerationEffect = "NoExecute" - TypesKubeTolerationEffectNoSchedule TypesKubeTolerationEffect = "NoSchedule" - TypesKubeTolerationEffectPreferNoSchedule TypesKubeTolerationEffect = "PreferNoSchedule" -) - -type TypesKubeTolerationEffect_1 string - -const ( - TypesKubeTolerationEffect_1_NoExecute TypesKubeTolerationEffect_1 = "NoExecute" - TypesKubeTolerationEffect_1_NoSchedule TypesKubeTolerationEffect_1 = "NoSchedule" - TypesKubeTolerationEffect_1_PreferNoSchedule TypesKubeTolerationEffect_1 = "PreferNoSchedule" -) - -type TypesKubeTolerationOperator string - -const ( - TypesKubeTolerationOperatorEqual TypesKubeTolerationOperator = "Equal" - TypesKubeTolerationOperatorExists TypesKubeTolerationOperator = "Exists" -) - -type TypesKubeTolerationOperator_1 string - -const ( - TypesKubeTolerationOperator_1_Equal TypesKubeTolerationOperator_1 = "Equal" - TypesKubeTolerationOperator_1_Exists TypesKubeTolerationOperator_1 = "Exists" -) - -type TypesKubeToleration_1 struct { - // Effect corresponds to the JSON schema field "effect". - Effect TypesKubeTolerationEffect_1 `json:"effect" yaml:"effect" mapstructure:"effect"` - - // Key corresponds to the JSON schema field "key". - Key string `json:"key" yaml:"key" mapstructure:"key"` - - // Operator corresponds to the JSON schema field "operator". - Operator *TypesKubeTolerationOperator_1 `json:"operator,omitempty" yaml:"operator,omitempty" mapstructure:"operator,omitempty"` - - // Value corresponds to the JSON schema field "value". - Value string `json:"value" yaml:"value" mapstructure:"value"` -} - -type TypesSemVer string - -type TypesSshPubKey string - -type TypesTcpPort int - -type TypesUri string - -var enumValues_EksclusterKfdV1Alpha2Kind = []interface{}{ - "EKSCluster", -} - -var enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = []interface{}{ - "create", - "replace", - "merge", -} - -var enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = []interface{}{ - "create", - "replace", - "merge", -} - -var enumValues_SpecDistributionModulesAuthProviderType = []interface{}{ - "none", - "basicAuth", - "sso", -} - -var enumValues_SpecDistributionModulesDrType = []interface{}{ - "none", - "eks", -} - -var enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType = []interface{}{ - "dns01", - "http01", -} - -var enumValues_SpecDistributionModulesIngressNginxTLSProvider = []interface{}{ - "certManager", - "secret", - "none", -} - -var enumValues_SpecDistributionModulesIngressNginxType = []interface{}{ - "none", - "single", - "dual", -} - -var enumValues_SpecDistributionModulesLoggingLokiBackend = []interface{}{ - "minio", - "externalEndpoint", -} - -var enumValues_SpecDistributionModulesLoggingOpensearchType = []interface{}{ - "single", - "triple", -} - -var enumValues_SpecDistributionModulesLoggingType = []interface{}{ - "none", - "opensearch", - "loki", - "customOutputs", -} - -var enumValues_SpecDistributionModulesMonitoringMimirBackend = []interface{}{ - "minio", - "externalEndpoint", -} - -var enumValues_SpecDistributionModulesMonitoringType = []interface{}{ - "none", - "prometheus", - "prometheusAgent", - "mimir", -} - -var enumValues_SpecDistributionModulesNetworkingType = []interface{}{ - "none", -} - -var enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction = []interface{}{ - "deny", - "dryrun", - "warn", -} - -var enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction = []interface{}{ - "audit", - "enforce", -} - -var enumValues_SpecDistributionModulesPolicyType = []interface{}{ - "none", - "gatekeeper", - "kyverno", -} - -var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType = []interface{}{ - "ingress", - "egress", -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSelfType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType, v) - } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleSelfType(v) - return nil -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesSecretGeneratorResource) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAwsLoadBalancerController) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionCustomPatchesSecretGeneratorResource: required") + if v, ok := raw["iamRoleArn"]; !ok || v == nil { + return fmt.Errorf("field iamRoleArn in SpecDistributionModulesAwsLoadBalancerController: required") } - type Plain SpecDistributionCustomPatchesSecretGeneratorResource + type Plain SpecDistributionModulesAwsLoadBalancerController var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionCustomPatchesSecretGeneratorResource(plain) + *j = SpecDistributionModulesAwsLoadBalancerController(plain) return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesSecretGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior, v) - } - *j = SpecDistributionCustomPatchesSecretGeneratorResourceBehavior(v) - return nil +type TypesFuryModuleOverridesIngress struct { + // If true, the ingress will not have authentication even if + // `.spec.modules.auth.provider.type` is SSO or Basic Auth. + DisableAuth *bool `json:"disableAuth,omitempty" yaml:"disableAuth,omitempty" mapstructure:"disableAuth,omitempty"` + + // Use this host for the ingress instead of the default one. + Host *string `json:"host,omitempty" yaml:"host,omitempty" mapstructure:"host,omitempty"` + + // Use this ingress class for the ingress instead of the default one. + IngressClass *string `json:"ingressClass,omitempty" yaml:"ingressClass,omitempty" mapstructure:"ingressClass,omitempty"` } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["cidrBlocks"]; !ok || v == nil { - return fmt.Errorf("field cidrBlocks in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") - } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") - } - if v, ok := raw["ports"]; !ok || v == nil { - return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") - } - if v, ok := raw["protocol"]; !ok || v == nil { - return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") - } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") - } - type Plain SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - if plain.CidrBlocks != nil && len(plain.CidrBlocks) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "cidrBlocks", 1) - } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock(plain) - return nil +type TypesFuryModuleOverridesIngresses map[string]TypesFuryModuleOverridesIngress + +// Override the common configuration with a particular configuration for the +// module. +type TypesFuryModuleOverrides struct { + // Ingresses corresponds to the JSON schema field "ingresses". + Ingresses TypesFuryModuleOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` + + // Set to override the node selector used to place the pods of the module. + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // Set to override the tolerations that will be added to the pods of the module. + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +} + +type SpecDistributionModulesAws struct { + // ClusterAutoscaler corresponds to the JSON schema field "clusterAutoscaler". + ClusterAutoscaler SpecDistributionModulesAwsClusterAutoscaler `json:"clusterAutoscaler" yaml:"clusterAutoscaler" mapstructure:"clusterAutoscaler"` + + // EbsCsiDriver corresponds to the JSON schema field "ebsCsiDriver". + EbsCsiDriver SpecDistributionModulesAwsEbsCsiDriver `json:"ebsCsiDriver" yaml:"ebsCsiDriver" mapstructure:"ebsCsiDriver"` + + // EbsSnapshotController corresponds to the JSON schema field + // "ebsSnapshotController". + EbsSnapshotController *SpecDistributionModulesAwsEbsSnapshotController `json:"ebsSnapshotController,omitempty" yaml:"ebsSnapshotController,omitempty" mapstructure:"ebsSnapshotController,omitempty"` + + // LoadBalancerController corresponds to the JSON schema field + // "loadBalancerController". + LoadBalancerController SpecDistributionModulesAwsLoadBalancerController `json:"loadBalancerController" yaml:"loadBalancerController" mapstructure:"loadBalancerController"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides TypesFuryModuleOverrides `json:"overrides" yaml:"overrides" mapstructure:"overrides"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSelf) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAws) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") - } - if v, ok := raw["ports"]; !ok || v == nil { - return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") + if v, ok := raw["clusterAutoscaler"]; !ok || v == nil { + return fmt.Errorf("field clusterAutoscaler in SpecDistributionModulesAws: required") } - if v, ok := raw["protocol"]; !ok || v == nil { - return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") + if v, ok := raw["ebsCsiDriver"]; !ok || v == nil { + return fmt.Errorf("field ebsCsiDriver in SpecDistributionModulesAws: required") } - if v, ok := raw["self"]; !ok || v == nil { - return fmt.Errorf("field self in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") + if v, ok := raw["loadBalancerController"]; !ok || v == nil { + return fmt.Errorf("field loadBalancerController in SpecDistributionModulesAws: required") } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") + if v, ok := raw["overrides"]; !ok || v == nil { + return fmt.Errorf("field overrides in SpecDistributionModulesAws: required") } - type Plain SpecKubernetesNodePoolAdditionalFirewallRuleSelf + type Plain SpecDistributionModulesAws var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleSelf(plain) + *j = SpecDistributionModulesAws(plain) return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthDex) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["connectors"]; !ok || v == nil { - return fmt.Errorf("field connectors in SpecDistributionModulesAuthDex: required") - } - type Plain SpecDistributionModulesAuthDex - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesAuthDex(plain) - return nil -} +type SpecDistributionModulesDrType string -var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType = []interface{}{ - "ingress", - "egress", +var enumValues_SpecDistributionModulesDrType = []interface{}{ + "none", + "eks", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesDrType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType { + for _, expected := range enumValues_SpecDistributionModulesDrType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrType, v) } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType(v) + *j = SpecDistributionModulesDrType(v) return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthOverridesIngress) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["host"]; !ok || v == nil { - return fmt.Errorf("field host in SpecDistributionModulesAuthOverridesIngress: required") - } - if v, ok := raw["ingressClass"]; !ok || v == nil { - return fmt.Errorf("field ingressClass in SpecDistributionModulesAuthOverridesIngress: required") - } - type Plain SpecDistributionModulesAuthOverridesIngress - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesAuthOverridesIngress(plain) - return nil +const ( + SpecDistributionModulesDrTypeNone SpecDistributionModulesDrType = "none" + SpecDistributionModulesDrTypeEks SpecDistributionModulesDrType = "eks" +) + +type TypesAwsS3BucketName string + +type TypesAwsRegion string + +var enumValues_TypesAwsRegion = []interface{}{ + "af-south-1", + "ap-east-1", + "ap-northeast-1", + "ap-northeast-2", + "ap-northeast-3", + "ap-south-1", + "ap-south-2", + "ap-southeast-1", + "ap-southeast-2", + "ap-southeast-3", + "ap-southeast-4", + "ca-central-1", + "eu-central-1", + "eu-central-2", + "eu-north-1", + "eu-south-1", + "eu-south-2", + "eu-west-1", + "eu-west-2", + "eu-west-3", + "me-central-1", + "me-south-1", + "sa-east-1", + "us-east-1", + "us-east-2", + "us-gov-east-1", + "us-gov-west-1", + "us-west-1", + "us-west-2", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType) UnmarshalJSON(b []byte) error { +func (j *TypesAwsRegion) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType { + for _, expected := range enumValues_TypesAwsRegion { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesAwsRegion, v) } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType(v) + *j = TypesAwsRegion(v) return nil } -var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType = []interface{}{ - "ingress", - "egress", +const TypesAwsRegionAfSouth1 TypesAwsRegion = "af-south-1" + +type Metadata struct { + // The name of the cluster. It will also be used as a prefix for all the other + // resources created. + Name string `json:"name" yaml:"name" mapstructure:"name"` +} + +const ( + TypesAwsRegionApNortheast1 TypesAwsRegion = "ap-northeast-1" + TypesAwsRegionApNortheast2 TypesAwsRegion = "ap-northeast-2" + TypesAwsRegionApNortheast3 TypesAwsRegion = "ap-northeast-3" + TypesAwsRegionApSouth1 TypesAwsRegion = "ap-south-1" + TypesAwsRegionApSouth2 TypesAwsRegion = "ap-south-2" + TypesAwsRegionApSoutheast1 TypesAwsRegion = "ap-southeast-1" + TypesAwsRegionApSoutheast2 TypesAwsRegion = "ap-southeast-2" + TypesAwsRegionApSoutheast3 TypesAwsRegion = "ap-southeast-3" + TypesAwsRegionApSoutheast4 TypesAwsRegion = "ap-southeast-4" + TypesAwsRegionCaCentral1 TypesAwsRegion = "ca-central-1" + TypesAwsRegionEuCentral1 TypesAwsRegion = "eu-central-1" + TypesAwsRegionEuCentral2 TypesAwsRegion = "eu-central-2" + TypesAwsRegionEuNorth1 TypesAwsRegion = "eu-north-1" + TypesAwsRegionEuSouth1 TypesAwsRegion = "eu-south-1" + TypesAwsRegionEuSouth2 TypesAwsRegion = "eu-south-2" + TypesAwsRegionEuWest1 TypesAwsRegion = "eu-west-1" + TypesAwsRegionEuWest2 TypesAwsRegion = "eu-west-2" + TypesAwsRegionEuWest3 TypesAwsRegion = "eu-west-3" + TypesAwsRegionMeCentral1 TypesAwsRegion = "me-central-1" + TypesAwsRegionMeSouth1 TypesAwsRegion = "me-south-1" + TypesAwsRegionSaEast1 TypesAwsRegion = "sa-east-1" + TypesAwsRegionUsEast1 TypesAwsRegion = "us-east-1" + TypesAwsRegionUsEast2 TypesAwsRegion = "us-east-2" + TypesAwsRegionUsGovEast1 TypesAwsRegion = "us-gov-east-1" + TypesAwsRegionUsGovWest1 TypesAwsRegion = "us-gov-west-1" + TypesAwsRegionUsWest1 TypesAwsRegion = "us-west-1" + TypesAwsRegionUsWest2 TypesAwsRegion = "us-west-2" +) + +type SpecDistributionModulesDrVeleroEks struct { + // The name of the bucket for Velero. + BucketName TypesAwsS3BucketName `json:"bucketName" yaml:"bucketName" mapstructure:"bucketName"` + + // IamRoleArn corresponds to the JSON schema field "iamRoleArn". + IamRoleArn TypesAwsArn `json:"iamRoleArn" yaml:"iamRoleArn" mapstructure:"iamRoleArn"` + + // The region where the bucket for Velero will be located. + Region TypesAwsRegion `json:"region" yaml:"region" mapstructure:"region"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesDrVeleroEks) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") - } - if v, ok := raw["ports"]; !ok || v == nil { - return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") - } - if v, ok := raw["protocol"]; !ok || v == nil { - return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") + if v, ok := raw["bucketName"]; !ok || v == nil { + return fmt.Errorf("field bucketName in SpecDistributionModulesDrVeleroEks: required") } - if v, ok := raw["sourceSecurityGroupId"]; !ok || v == nil { - return fmt.Errorf("field sourceSecurityGroupId in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") + if v, ok := raw["iamRoleArn"]; !ok || v == nil { + return fmt.Errorf("field iamRoleArn in SpecDistributionModulesDrVeleroEks: required") } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") + if v, ok := raw["region"]; !ok || v == nil { + return fmt.Errorf("field region in SpecDistributionModulesDrVeleroEks: required") } - type Plain SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId + type Plain SpecDistributionModulesDrVeleroEks var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId(plain) + *j = SpecDistributionModulesDrVeleroEks(plain) return nil } +// Configuration for Velero's manifests backup schedule. +type SpecDistributionModulesDrVeleroSchedulesDefinitionsFull struct { + // The cron expression for the `full` backup schedule (default `0 1 * * *`). + Schedule *string `json:"schedule,omitempty" yaml:"schedule,omitempty" mapstructure:"schedule,omitempty"` + + // EXPERIMENTAL (if you do more than one backups, the following backups after the + // first are not automatically restorable, see + // https://github.com/vmware-tanzu/velero/issues/7057#issuecomment-2466815898 for + // the manual restore solution): SnapshotMoveData specifies whether snapshot data + // should be moved. Velero will create a new volume from the snapshot and upload + // the content to the storageLocation. + SnapshotMoveData *bool `json:"snapshotMoveData,omitempty" yaml:"snapshotMoveData,omitempty" mapstructure:"snapshotMoveData,omitempty"` + + // The Time To Live (TTL) of the backups created by the backup schedules (default + // `720h0m0s`, 30 days). Notice that changing this value will affect only newly + // created backups, prior backups will keep the old TTL. + Ttl *string `json:"ttl,omitempty" yaml:"ttl,omitempty" mapstructure:"ttl,omitempty"` +} + +// Configuration for Velero's manifests backup schedule. +type SpecDistributionModulesDrVeleroSchedulesDefinitionsManifests struct { + // The cron expression for the `manifests` backup schedule (default `*/15 * * * + // *`). + Schedule *string `json:"schedule,omitempty" yaml:"schedule,omitempty" mapstructure:"schedule,omitempty"` + + // The Time To Live (TTL) of the backups created by the backup schedules (default + // `720h0m0s`, 30 days). Notice that changing this value will affect only newly + // created backups, prior backups will keep the old TTL. + Ttl *string `json:"ttl,omitempty" yaml:"ttl,omitempty" mapstructure:"ttl,omitempty"` +} + +// Configuration for Velero schedules. +type SpecDistributionModulesDrVeleroSchedulesDefinitions struct { + // Configuration for Velero's manifests backup schedule. + Full *SpecDistributionModulesDrVeleroSchedulesDefinitionsFull `json:"full,omitempty" yaml:"full,omitempty" mapstructure:"full,omitempty"` + + // Configuration for Velero's manifests backup schedule. + Manifests *SpecDistributionModulesDrVeleroSchedulesDefinitionsManifests `json:"manifests,omitempty" yaml:"manifests,omitempty" mapstructure:"manifests,omitempty"` +} + +// Configuration for Velero's backup schedules. +type SpecDistributionModulesDrVeleroSchedules struct { + // Configuration for Velero schedules. + Definitions *SpecDistributionModulesDrVeleroSchedulesDefinitions `json:"definitions,omitempty" yaml:"definitions,omitempty" mapstructure:"definitions,omitempty"` + + // Whether to install or not the default `manifests` and `full` backups schedules. + // Default is `true`. + Install *bool `json:"install,omitempty" yaml:"install,omitempty" mapstructure:"install,omitempty"` +} + +type SpecDistributionModulesDrVelero struct { + // Eks corresponds to the JSON schema field "eks". + Eks SpecDistributionModulesDrVeleroEks `json:"eks" yaml:"eks" mapstructure:"eks"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // Configuration for Velero's backup schedules. + Schedules *SpecDistributionModulesDrVeleroSchedules `json:"schedules,omitempty" yaml:"schedules,omitempty" mapstructure:"schedules,omitempty"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthProviderBasicAuth) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesDrVelero) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["password"]; !ok || v == nil { - return fmt.Errorf("field password in SpecDistributionModulesAuthProviderBasicAuth: required") - } - if v, ok := raw["username"]; !ok || v == nil { - return fmt.Errorf("field username in SpecDistributionModulesAuthProviderBasicAuth: required") + if v, ok := raw["eks"]; !ok || v == nil { + return fmt.Errorf("field eks in SpecDistributionModulesDrVelero: required") } - type Plain SpecDistributionModulesAuthProviderBasicAuth + type Plain SpecDistributionModulesDrVelero var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthProviderBasicAuth(plain) + *j = SpecDistributionModulesDrVelero(plain) return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRules) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - type Plain SpecKubernetesNodePoolAdditionalFirewallRules - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - if plain.CidrBlocks != nil && len(plain.CidrBlocks) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "cidrBlocks", 1) - } - if plain.Self != nil && len(plain.Self) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "self", 1) - } - if plain.SourceSecurityGroupId != nil && len(plain.SourceSecurityGroupId) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "sourceSecurityGroupId", 1) - } - *j = SpecKubernetesNodePoolAdditionalFirewallRules(plain) - return nil +// Configuration for the Disaster Recovery module. +type SpecDistributionModulesDr struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // The type of the Disaster Recovery, must be `none` or `eks`. `none` disables the + // module and `eks` will install Velero and use an S3 bucket to store the + // backups. + // + // Default is `none`. + Type SpecDistributionModulesDrType `json:"type" yaml:"type" mapstructure:"type"` + + // Velero corresponds to the JSON schema field "velero". + Velero *SpecDistributionModulesDrVelero `json:"velero,omitempty" yaml:"velero,omitempty" mapstructure:"velero,omitempty"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRulePorts) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesDr) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["from"]; !ok || v == nil { - return fmt.Errorf("field from in SpecKubernetesNodePoolAdditionalFirewallRulePorts: required") - } - if v, ok := raw["to"]; !ok || v == nil { - return fmt.Errorf("field to in SpecKubernetesNodePoolAdditionalFirewallRulePorts: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesDr: required") } - type Plain SpecKubernetesNodePoolAdditionalFirewallRulePorts + type Plain SpecDistributionModulesDr var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesNodePoolAdditionalFirewallRulePorts(plain) + *j = SpecDistributionModulesDr(plain) return nil } +type SpecDistributionModulesIngressClusterIssuerRoute53 struct { + // HostedZoneId corresponds to the JSON schema field "hostedZoneId". + HostedZoneId string `json:"hostedZoneId" yaml:"hostedZoneId" mapstructure:"hostedZoneId"` + + // IamRoleArn corresponds to the JSON schema field "iamRoleArn". + IamRoleArn TypesAwsArn `json:"iamRoleArn" yaml:"iamRoleArn" mapstructure:"iamRoleArn"` + + // Region corresponds to the JSON schema field "region". + Region TypesAwsRegion `json:"region" yaml:"region" mapstructure:"region"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAmi) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressClusterIssuerRoute53) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["id"]; !ok || v == nil { - return fmt.Errorf("field id in SpecKubernetesNodePoolAmi: required") + if v, ok := raw["hostedZoneId"]; !ok || v == nil { + return fmt.Errorf("field hostedZoneId in SpecDistributionModulesIngressClusterIssuerRoute53: required") + } + if v, ok := raw["iamRoleArn"]; !ok || v == nil { + return fmt.Errorf("field iamRoleArn in SpecDistributionModulesIngressClusterIssuerRoute53: required") } - if v, ok := raw["owner"]; !ok || v == nil { - return fmt.Errorf("field owner in SpecKubernetesNodePoolAmi: required") + if v, ok := raw["region"]; !ok || v == nil { + return fmt.Errorf("field region in SpecDistributionModulesIngressClusterIssuerRoute53: required") } - type Plain SpecKubernetesNodePoolAmi + type Plain SpecDistributionModulesIngressClusterIssuerRoute53 var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesNodePoolAmi(plain) + *j = SpecDistributionModulesIngressClusterIssuerRoute53(plain) return nil } +type SpecDistributionModulesIngressCertManagerClusterIssuerType string + +var enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType = []interface{}{ + "dns01", + "http01", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthProviderType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressCertManagerClusterIssuerType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesAuthProviderType { + for _, expected := range enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesAuthProviderType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType, v) } - *j = SpecDistributionModulesAuthProviderType(v) + *j = SpecDistributionModulesIngressCertManagerClusterIssuerType(v) return nil } -var enumValues_SpecKubernetesNodePoolContainerRuntime = []interface{}{ - "docker", - "containerd", +const ( + SpecDistributionModulesIngressCertManagerClusterIssuerTypeDns01 SpecDistributionModulesIngressCertManagerClusterIssuerType = "dns01" + SpecDistributionModulesIngressCertManagerClusterIssuerTypeHttp01 SpecDistributionModulesIngressCertManagerClusterIssuerType = "http01" +) + +// Configuration for the cert-manager's ACME clusterIssuer used to request +// certificates from Let's Encrypt. +type SpecDistributionModulesIngressCertManagerClusterIssuer struct { + // The email address to use during the certificate issuing process. + Email string `json:"email" yaml:"email" mapstructure:"email"` + + // The name of the clusterIssuer. + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // Route53 corresponds to the JSON schema field "route53". + Route53 SpecDistributionModulesIngressClusterIssuerRoute53 `json:"route53" yaml:"route53" mapstructure:"route53"` + + // The list of challenge solvers to use instead of the default one for the + // `http01` challenge. Check [cert manager's + // documentation](https://cert-manager.io/docs/configuration/acme/#adding-multiple-solver-types) + // for examples for this field. + Solvers []interface{} `json:"solvers,omitempty" yaml:"solvers,omitempty" mapstructure:"solvers,omitempty"` + + // The type of the clusterIssuer, must be `dns01` for using DNS challenge or + // `http01` for using HTTP challenge. + Type *SpecDistributionModulesIngressCertManagerClusterIssuerType `json:"type,omitempty" yaml:"type,omitempty" mapstructure:"type,omitempty"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolContainerRuntime) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecDistributionModulesIngressCertManagerClusterIssuer) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolContainerRuntime { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["email"]; !ok || v == nil { + return fmt.Errorf("field email in SpecDistributionModulesIngressCertManagerClusterIssuer: required") + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionModulesIngressCertManagerClusterIssuer: required") + } + if v, ok := raw["route53"]; !ok || v == nil { + return fmt.Errorf("field route53 in SpecDistributionModulesIngressCertManagerClusterIssuer: required") + } + type Plain SpecDistributionModulesIngressCertManagerClusterIssuer + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressCertManagerClusterIssuer(plain) + return nil +} + +// Configuration for the cert-manager package. Required even if +// `ingress.nginx.type` is `none`, cert-manager is used for managing other +// certificates in the cluster besides the TLS termination certificates for the +// ingresses. +type SpecDistributionModulesIngressCertManager struct { + // ClusterIssuer corresponds to the JSON schema field "clusterIssuer". + ClusterIssuer SpecDistributionModulesIngressCertManagerClusterIssuer `json:"clusterIssuer" yaml:"clusterIssuer" mapstructure:"clusterIssuer"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressCertManager) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolContainerRuntime, v) + if v, ok := raw["clusterIssuer"]; !ok || v == nil { + return fmt.Errorf("field clusterIssuer in SpecDistributionModulesIngressCertManager: required") } - *j = SpecKubernetesNodePoolContainerRuntime(v) + type Plain SpecDistributionModulesIngressCertManager + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressCertManager(plain) return nil } +// The private DNS zone is used only when `ingress.nginx.type` is `dual`, for +// exposing infrastructural services only in the private DNS zone. +type SpecDistributionModulesIngressDNSPrivate struct { + // By default, a Terraform data source will be used to get the private DNS zone. + // Set to `true` to create the private zone instead. + Create bool `json:"create" yaml:"create" mapstructure:"create"` + + // The name of the private hosted zone. Example: `internal.fury-demo.sighup.io`. + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // VpcId corresponds to the JSON schema field "vpcId". + VpcId string `json:"vpcId" yaml:"vpcId" mapstructure:"vpcId"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthProvider) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressDNSPrivate) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesAuthProvider: required") + if v, ok := raw["create"]; !ok || v == nil { + return fmt.Errorf("field create in SpecDistributionModulesIngressDNSPrivate: required") } - type Plain SpecDistributionModulesAuthProvider + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionModulesIngressDNSPrivate: required") + } + if v, ok := raw["vpcId"]; !ok || v == nil { + return fmt.Errorf("field vpcId in SpecDistributionModulesIngressDNSPrivate: required") + } + type Plain SpecDistributionModulesIngressDNSPrivate var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthProvider(plain) + *j = SpecDistributionModulesIngressDNSPrivate(plain) return nil } +type SpecDistributionModulesIngressDNSPublic struct { + // By default, a Terraform data source will be used to get the public DNS zone. + // Set to `true` to create the public zone instead. + Create bool `json:"create" yaml:"create" mapstructure:"create"` + + // The name of the public hosted zone. + Name string `json:"name" yaml:"name" mapstructure:"name"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuth) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressDNSPublic) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["provider"]; !ok || v == nil { - return fmt.Errorf("field provider in SpecDistributionModulesAuth: required") + if v, ok := raw["create"]; !ok || v == nil { + return fmt.Errorf("field create in SpecDistributionModulesIngressDNSPublic: required") } - type Plain SpecDistributionModulesAuth + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionModulesIngressDNSPublic: required") + } + type Plain SpecDistributionModulesIngressDNSPublic var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuth(plain) + *j = SpecDistributionModulesIngressDNSPublic(plain) return nil } +// DNS definition, used in conjunction with `externalDNS` package to automate DNS +// management and certificates emission. +type SpecDistributionModulesIngressDNS struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // Private corresponds to the JSON schema field "private". + Private *SpecDistributionModulesIngressDNSPrivate `json:"private,omitempty" yaml:"private,omitempty" mapstructure:"private,omitempty"` + + // Public corresponds to the JSON schema field "public". + Public *SpecDistributionModulesIngressDNSPublic `json:"public,omitempty" yaml:"public,omitempty" mapstructure:"public,omitempty"` +} + +type SpecDistributionModulesIngressExternalDNS struct { + // PrivateIamRoleArn corresponds to the JSON schema field "privateIamRoleArn". + PrivateIamRoleArn TypesAwsArn `json:"privateIamRoleArn" yaml:"privateIamRoleArn" mapstructure:"privateIamRoleArn"` + + // PublicIamRoleArn corresponds to the JSON schema field "publicIamRoleArn". + PublicIamRoleArn TypesAwsArn `json:"publicIamRoleArn" yaml:"publicIamRoleArn" mapstructure:"publicIamRoleArn"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesLogsTypesElem) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecDistributionModulesIngressExternalDNS) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecKubernetesLogsTypesElem { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["privateIamRoleArn"]; !ok || v == nil { + return fmt.Errorf("field privateIamRoleArn in SpecDistributionModulesIngressExternalDNS: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesLogsTypesElem, v) + if v, ok := raw["publicIamRoleArn"]; !ok || v == nil { + return fmt.Errorf("field publicIamRoleArn in SpecDistributionModulesIngressExternalDNS: required") } - *j = SpecKubernetesLogsTypesElem(v) + type Plain SpecDistributionModulesIngressExternalDNS + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressExternalDNS(plain) return nil } -var enumValues_SpecKubernetesNodePoolInstanceVolumeType = []interface{}{ - "gp2", - "gp3", - "io1", - "standard", +type SpecDistributionModulesIngressForecastle struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +type SpecDistributionModulesIngressNginxTLSProvider string + +var enumValues_SpecDistributionModulesIngressNginxTLSProvider = []interface{}{ + "certManager", + "secret", + "none", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolInstanceVolumeType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressNginxTLSProvider) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolInstanceVolumeType { + for _, expected := range enumValues_SpecDistributionModulesIngressNginxTLSProvider { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolInstanceVolumeType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxTLSProvider, v) } - *j = SpecKubernetesNodePoolInstanceVolumeType(v) + *j = SpecDistributionModulesIngressNginxTLSProvider(v) return nil } -var enumValues_SpecKubernetesLogsTypesElem = []interface{}{ - "api", - "audit", - "authenticator", - "controllerManager", - "scheduler", -} +const ( + SpecDistributionModulesIngressNginxTLSProviderCertManager SpecDistributionModulesIngressNginxTLSProvider = "certManager" + SpecDistributionModulesIngressNginxTLSProviderSecret SpecDistributionModulesIngressNginxTLSProvider = "secret" + SpecDistributionModulesIngressNginxTLSProviderNone SpecDistributionModulesIngressNginxTLSProvider = "none" +) -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAwsClusterAutoscaler) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["iamRoleArn"]; !ok || v == nil { - return fmt.Errorf("field iamRoleArn in SpecDistributionModulesAwsClusterAutoscaler: required") - } - type Plain SpecDistributionModulesAwsClusterAutoscaler - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesAwsClusterAutoscaler(plain) - return nil +// Kubernetes TLS secret for the ingresses TLS certificate. +type SpecDistributionModulesIngressNginxTLSSecret struct { + // The Certificate Authority certificate file's content. You can use the + // `"{file://}"` notation to get the content from a file. + Ca string `json:"ca" yaml:"ca" mapstructure:"ca"` + + // The certificate file's content. You can use the `"{file://}"` notation to + // get the content from a file. + Cert string `json:"cert" yaml:"cert" mapstructure:"cert"` + + // The signing key file's content. You can use the `"{file://}"` notation to + // get the content from a file. + Key string `json:"key" yaml:"key" mapstructure:"key"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesAwsAuthUser) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressNginxTLSSecret) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["groups"]; !ok || v == nil { - return fmt.Errorf("field groups in SpecKubernetesAwsAuthUser: required") + if v, ok := raw["ca"]; !ok || v == nil { + return fmt.Errorf("field ca in SpecDistributionModulesIngressNginxTLSSecret: required") } - if v, ok := raw["userarn"]; !ok || v == nil { - return fmt.Errorf("field userarn in SpecKubernetesAwsAuthUser: required") + if v, ok := raw["cert"]; !ok || v == nil { + return fmt.Errorf("field cert in SpecDistributionModulesIngressNginxTLSSecret: required") } - if v, ok := raw["username"]; !ok || v == nil { - return fmt.Errorf("field username in SpecKubernetesAwsAuthUser: required") + if v, ok := raw["key"]; !ok || v == nil { + return fmt.Errorf("field key in SpecDistributionModulesIngressNginxTLSSecret: required") } - type Plain SpecKubernetesAwsAuthUser + type Plain SpecDistributionModulesIngressNginxTLSSecret var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesAwsAuthUser(plain) + *j = SpecDistributionModulesIngressNginxTLSSecret(plain) return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAwsEbsCsiDriver) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["iamRoleArn"]; !ok || v == nil { - return fmt.Errorf("field iamRoleArn in SpecDistributionModulesAwsEbsCsiDriver: required") - } - type Plain SpecDistributionModulesAwsEbsCsiDriver - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesAwsEbsCsiDriver(plain) - return nil +type SpecDistributionModulesIngressNginxTLS struct { + // The provider of the TLS certificates for the ingresses, one of: `none`, + // `certManager`, or `secret`. + Provider SpecDistributionModulesIngressNginxTLSProvider `json:"provider" yaml:"provider" mapstructure:"provider"` + + // Secret corresponds to the JSON schema field "secret". + Secret *SpecDistributionModulesIngressNginxTLSSecret `json:"secret,omitempty" yaml:"secret,omitempty" mapstructure:"secret,omitempty"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesAwsAuthRole) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressNginxTLS) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["groups"]; !ok || v == nil { - return fmt.Errorf("field groups in SpecKubernetesAwsAuthRole: required") - } - if v, ok := raw["rolearn"]; !ok || v == nil { - return fmt.Errorf("field rolearn in SpecKubernetesAwsAuthRole: required") - } - if v, ok := raw["username"]; !ok || v == nil { - return fmt.Errorf("field username in SpecKubernetesAwsAuthRole: required") + if v, ok := raw["provider"]; !ok || v == nil { + return fmt.Errorf("field provider in SpecDistributionModulesIngressNginxTLS: required") } - type Plain SpecKubernetesAwsAuthRole + type Plain SpecDistributionModulesIngressNginxTLS var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesAwsAuthRole(plain) + *j = SpecDistributionModulesIngressNginxTLS(plain) return nil } +type SpecDistributionModulesIngressNginxType string + +var enumValues_SpecDistributionModulesIngressNginxType = []interface{}{ + "none", + "single", + "dual", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolInstance) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecDistributionModulesIngressNginxType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecKubernetesNodePoolInstance: required") + var ok bool + for _, expected := range enumValues_SpecDistributionModulesIngressNginxType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecKubernetesNodePoolInstance - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxType, v) } - *j = SpecKubernetesNodePoolInstance(plain) + *j = SpecDistributionModulesIngressNginxType(v) return nil } +const ( + SpecDistributionModulesIngressNginxTypeNone SpecDistributionModulesIngressNginxType = "none" + SpecDistributionModulesIngressNginxTypeSingle SpecDistributionModulesIngressNginxType = "single" + SpecDistributionModulesIngressNginxTypeDual SpecDistributionModulesIngressNginxType = "dual" +) + +type SpecDistributionModulesIngressNginx struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // Tls corresponds to the JSON schema field "tls". + Tls *SpecDistributionModulesIngressNginxTLS `json:"tls,omitempty" yaml:"tls,omitempty" mapstructure:"tls,omitempty"` + + // The type of the Ingress nginx controller, options are: + // - `none`: no ingress controller will be installed and no infrastructural + // ingresses will be created. + // - `single`: a single ingress controller with ingress class `nginx` will be + // installed to manage all the ingress resources, infrastructural ingresses will + // be created. + // - `dual`: two independent ingress controllers will be installed, one for the + // `internal` ingress class intended for private ingresses and one for the + // `external` ingress class intended for public ingresses. KFD infrastructural + // ingresses wil use the `internal` ingress class when using the dual type. + // + // Default is `single`. + Type SpecDistributionModulesIngressNginxType `json:"type" yaml:"type" mapstructure:"type"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAwsLoadBalancerController) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressNginx) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["iamRoleArn"]; !ok || v == nil { - return fmt.Errorf("field iamRoleArn in SpecDistributionModulesAwsLoadBalancerController: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesIngressNginx: required") } - type Plain SpecDistributionModulesAwsLoadBalancerController + type Plain SpecDistributionModulesIngressNginx var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAwsLoadBalancerController(plain) + *j = SpecDistributionModulesIngressNginx(plain) return nil } +type SpecDistributionModulesIngressOverridesIngresses struct { + // Forecastle corresponds to the JSON schema field "forecastle". + Forecastle *TypesFuryModuleOverridesIngress `json:"forecastle,omitempty" yaml:"forecastle,omitempty" mapstructure:"forecastle,omitempty"` +} + +// Override the common configuration with a particular configuration for the +// Ingress module. +type SpecDistributionModulesIngressOverrides struct { + // Ingresses corresponds to the JSON schema field "ingresses". + Ingresses *SpecDistributionModulesIngressOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` + + // Set to override the node selector used to place the pods of the Ingress module. + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // Set to override the tolerations that will be added to the pods of the Ingress + // module. + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +} + +type SpecDistributionModulesIngress struct { + // The base domain used for all the KFD infrastructural ingresses. If in the nginx + // `dual` configuration type, this value should be the same as the + // `.spec.distribution.modules.ingress.dns.private.name` zone. + BaseDomain string `json:"baseDomain" yaml:"baseDomain" mapstructure:"baseDomain"` + + // Configuration for the cert-manager package. Required even if + // `ingress.nginx.type` is `none`, cert-manager is used for managing other + // certificates in the cluster besides the TLS termination certificates for the + // ingresses. + CertManager SpecDistributionModulesIngressCertManager `json:"certManager" yaml:"certManager" mapstructure:"certManager"` + + // Dns corresponds to the JSON schema field "dns". + Dns *SpecDistributionModulesIngressDNS `json:"dns,omitempty" yaml:"dns,omitempty" mapstructure:"dns,omitempty"` + + // ExternalDns corresponds to the JSON schema field "externalDns". + ExternalDns SpecDistributionModulesIngressExternalDNS `json:"externalDns" yaml:"externalDns" mapstructure:"externalDns"` + + // Forecastle corresponds to the JSON schema field "forecastle". + Forecastle *SpecDistributionModulesIngressForecastle `json:"forecastle,omitempty" yaml:"forecastle,omitempty" mapstructure:"forecastle,omitempty"` + + // Configurations for the Ingress nginx controller package. + Nginx SpecDistributionModulesIngressNginx `json:"nginx" yaml:"nginx" mapstructure:"nginx"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *SpecDistributionModulesIngressOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesAPIServer) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngress) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["privateAccess"]; !ok || v == nil { - return fmt.Errorf("field privateAccess in SpecKubernetesAPIServer: required") + if v, ok := raw["baseDomain"]; !ok || v == nil { + return fmt.Errorf("field baseDomain in SpecDistributionModulesIngress: required") } - if v, ok := raw["publicAccess"]; !ok || v == nil { - return fmt.Errorf("field publicAccess in SpecKubernetesAPIServer: required") + if v, ok := raw["certManager"]; !ok || v == nil { + return fmt.Errorf("field certManager in SpecDistributionModulesIngress: required") } - type Plain SpecKubernetesAPIServer + if v, ok := raw["externalDns"]; !ok || v == nil { + return fmt.Errorf("field externalDns in SpecDistributionModulesIngress: required") + } + if v, ok := raw["nginx"]; !ok || v == nil { + return fmt.Errorf("field nginx in SpecDistributionModulesIngress: required") + } + type Plain SpecDistributionModulesIngress var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesAPIServer(plain) + *j = SpecDistributionModulesIngress(plain) return nil } +// DEPRECATED since KFD v1.26.6, 1.27.5, v1.28.0. +type SpecDistributionModulesLoggingCerebro struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +// When using the `customOutputs` logging type, you need to manually specify the +// spec of the several `Output` and `ClusterOutputs` that the Logging Operator +// expects to forward the logs collected by the pre-defined flows. +type SpecDistributionModulesLoggingCustomOutputs struct { + // This value defines where the output from the `audit` Flow will be sent. This + // will be the `spec` section of the `Output` object. It must be a string (and not + // a YAML object) following the OutputSpec definition. Use the `nullout` output to + // discard the flow: `nullout: {}` + Audit string `json:"audit" yaml:"audit" mapstructure:"audit"` + + // This value defines where the output from the `errors` Flow will be sent. This + // will be the `spec` section of the `Output` object. It must be a string (and not + // a YAML object) following the OutputSpec definition. Use the `nullout` output to + // discard the flow: `nullout: {}` + Errors string `json:"errors" yaml:"errors" mapstructure:"errors"` + + // This value defines where the output from the `events` Flow will be sent. This + // will be the `spec` section of the `Output` object. It must be a string (and not + // a YAML object) following the OutputSpec definition. Use the `nullout` output to + // discard the flow: `nullout: {}` + Events string `json:"events" yaml:"events" mapstructure:"events"` + + // This value defines where the output from the `infra` Flow will be sent. This + // will be the `spec` section of the `Output` object. It must be a string (and not + // a YAML object) following the OutputSpec definition. Use the `nullout` output to + // discard the flow: `nullout: {}` + Infra string `json:"infra" yaml:"infra" mapstructure:"infra"` + + // This value defines where the output from the `ingressNginx` Flow will be sent. + // This will be the `spec` section of the `Output` object. It must be a string + // (and not a YAML object) following the OutputSpec definition. Use the `nullout` + // output to discard the flow: `nullout: {}` + IngressNginx string `json:"ingressNginx" yaml:"ingressNginx" mapstructure:"ingressNginx"` + + // This value defines where the output from the `kubernetes` Flow will be sent. + // This will be the `spec` section of the `Output` object. It must be a string + // (and not a YAML object) following the OutputSpec definition. Use the `nullout` + // output to discard the flow: `nullout: {}` + Kubernetes string `json:"kubernetes" yaml:"kubernetes" mapstructure:"kubernetes"` + + // This value defines where the output from the `systemdCommon` Flow will be sent. + // This will be the `spec` section of the `Output` object. It must be a string + // (and not a YAML object) following the OutputSpec definition. Use the `nullout` + // output to discard the flow: `nullout: {}` + SystemdCommon string `json:"systemdCommon" yaml:"systemdCommon" mapstructure:"systemdCommon"` + + // This value defines where the output from the `systemdEtcd` Flow will be sent. + // This will be the `spec` section of the `Output` object. It must be a string + // (and not a YAML object) following the OutputSpec definition. Use the `nullout` + // output to discard the flow: `nullout: {}` + SystemdEtcd string `json:"systemdEtcd" yaml:"systemdEtcd" mapstructure:"systemdEtcd"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolSize) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLoggingCustomOutputs) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["max"]; !ok || v == nil { - return fmt.Errorf("field max in SpecKubernetesNodePoolSize: required") + if v, ok := raw["audit"]; !ok || v == nil { + return fmt.Errorf("field audit in SpecDistributionModulesLoggingCustomOutputs: required") } - if v, ok := raw["min"]; !ok || v == nil { - return fmt.Errorf("field min in SpecKubernetesNodePoolSize: required") + if v, ok := raw["errors"]; !ok || v == nil { + return fmt.Errorf("field errors in SpecDistributionModulesLoggingCustomOutputs: required") } - type Plain SpecKubernetesNodePoolSize - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if v, ok := raw["events"]; !ok || v == nil { + return fmt.Errorf("field events in SpecDistributionModulesLoggingCustomOutputs: required") } - *j = SpecKubernetesNodePoolSize(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecInfrastructureVpn) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err + if v, ok := raw["infra"]; !ok || v == nil { + return fmt.Errorf("field infra in SpecDistributionModulesLoggingCustomOutputs: required") } - if v, ok := raw["ssh"]; !ok || v == nil { - return fmt.Errorf("field ssh in SpecInfrastructureVpn: required") + if v, ok := raw["ingressNginx"]; !ok || v == nil { + return fmt.Errorf("field ingressNginx in SpecDistributionModulesLoggingCustomOutputs: required") } - if v, ok := raw["vpnClientsSubnetCidr"]; !ok || v == nil { - return fmt.Errorf("field vpnClientsSubnetCidr in SpecInfrastructureVpn: required") + if v, ok := raw["kubernetes"]; !ok || v == nil { + return fmt.Errorf("field kubernetes in SpecDistributionModulesLoggingCustomOutputs: required") } - type Plain SpecInfrastructureVpn + if v, ok := raw["systemdCommon"]; !ok || v == nil { + return fmt.Errorf("field systemdCommon in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["systemdEtcd"]; !ok || v == nil { + return fmt.Errorf("field systemdEtcd in SpecDistributionModulesLoggingCustomOutputs: required") + } + type Plain SpecDistributionModulesLoggingCustomOutputs var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecInfrastructureVpn(plain) + *j = SpecDistributionModulesLoggingCustomOutputs(plain) return nil } +type SpecDistributionModulesLoggingLokiBackend string + +var enumValues_SpecDistributionModulesLoggingLokiBackend = []interface{}{ + "minio", + "externalEndpoint", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecInfrastructureVpnSsh) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecDistributionModulesLoggingLokiBackend) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["allowedFromCidrs"]; !ok || v == nil { - return fmt.Errorf("field allowedFromCidrs in SpecInfrastructureVpnSsh: required") - } - if v, ok := raw["githubUsersName"]; !ok || v == nil { - return fmt.Errorf("field githubUsersName in SpecInfrastructureVpnSsh: required") - } - type Plain SpecInfrastructureVpnSsh - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + var ok bool + for _, expected := range enumValues_SpecDistributionModulesLoggingLokiBackend { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - if plain.GithubUsersName != nil && len(plain.GithubUsersName) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "githubUsersName", 1) + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingLokiBackend, v) } - *j = SpecInfrastructureVpnSsh(plain) + *j = SpecDistributionModulesLoggingLokiBackend(v) return nil } +const ( + SpecDistributionModulesLoggingLokiBackendMinio SpecDistributionModulesLoggingLokiBackend = "minio" + SpecDistributionModulesLoggingLokiBackendExternalEndpoint SpecDistributionModulesLoggingLokiBackend = "externalEndpoint" +) + +// Configuration for Loki's external storage backend. +type SpecDistributionModulesLoggingLokiExternalEndpoint struct { + // The access key ID (username) for the external S3-compatible bucket. + AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` + + // The bucket name of the external S3-compatible object storage. + BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` + + // External S3-compatible endpoint for Loki's storage. + Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` + + // If true, will use HTTP as protocol instead of HTTPS. + Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` + + // The secret access key (password) for the external S3-compatible bucket. + SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` +} + +type TypesKubeResourcesLimits struct { + // The CPU limit for the Pod. Example: `1000m`. + Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` + + // The memory limit for the Pod. Example: `1G`. + Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` +} + +type TypesKubeResourcesRequests struct { + // The CPU request for the Pod, in cores. Example: `500m`. + Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` + + // The memory request for the Pod. Example: `500M`. + Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` +} + +type TypesKubeResources struct { + // Limits corresponds to the JSON schema field "limits". + Limits *TypesKubeResourcesLimits `json:"limits,omitempty" yaml:"limits,omitempty" mapstructure:"limits,omitempty"` + + // Requests corresponds to the JSON schema field "requests". + Requests *TypesKubeResourcesRequests `json:"requests,omitempty" yaml:"requests,omitempty" mapstructure:"requests,omitempty"` +} + +// Configuration for the Loki package. +type SpecDistributionModulesLoggingLoki struct { + // The storage backend type for Loki. `minio` will use an in-cluster MinIO + // deployment for object storage, `externalEndpoint` can be used to point to an + // external object storage instead of deploying an in-cluster MinIO. + Backend *SpecDistributionModulesLoggingLokiBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` + + // Configuration for Loki's external storage backend. + ExternalEndpoint *SpecDistributionModulesLoggingLokiExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` + + // Resources corresponds to the JSON schema field "resources". + Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` + + // Starting from versions 1.28.4, 1.29.5 and 1.30.0 of KFD, Loki will change the + // time series database from BoltDB to TSDB and the schema from v11 to v13 that it + // uses to store the logs. + // + // The value of this field will determine the date when Loki will start writing + // using the new TSDB and the schema v13, always at midnight UTC. The old BoltDB + // and schema will be kept until they expire for reading purposes. + // + // Value must be a string in `ISO 8601` date format (`yyyy-mm-dd`). Example: + // `2024-11-18`. + TsdbStartDate types.SerializableDate `json:"tsdbStartDate" yaml:"tsdbStartDate" mapstructure:"tsdbStartDate"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAws) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLoggingLoki) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["clusterAutoscaler"]; !ok || v == nil { - return fmt.Errorf("field clusterAutoscaler in SpecDistributionModulesAws: required") - } - if v, ok := raw["ebsCsiDriver"]; !ok || v == nil { - return fmt.Errorf("field ebsCsiDriver in SpecDistributionModulesAws: required") - } - if v, ok := raw["loadBalancerController"]; !ok || v == nil { - return fmt.Errorf("field loadBalancerController in SpecDistributionModulesAws: required") - } - if v, ok := raw["overrides"]; !ok || v == nil { - return fmt.Errorf("field overrides in SpecDistributionModulesAws: required") + if v, ok := raw["tsdbStartDate"]; !ok || v == nil { + return fmt.Errorf("field tsdbStartDate in SpecDistributionModulesLoggingLoki: required") } - type Plain SpecDistributionModulesAws + type Plain SpecDistributionModulesLoggingLoki var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAws(plain) + *j = SpecDistributionModulesLoggingLoki(plain) return nil } -var enumValues_SpecKubernetesNodePoolType = []interface{}{ - "eks-managed", - "self-managed", +type SpecDistributionModulesLoggingMinioRootUser struct { + // The password for the default MinIO root user. + Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` + + // The username for the default MinIO root user. + Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` +} + +// Configuration for Logging's MinIO deployment. +type SpecDistributionModulesLoggingMinio struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // RootUser corresponds to the JSON schema field "rootUser". + RootUser *SpecDistributionModulesLoggingMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` + + // The PVC size for each MinIO disk, 6 disks total. + StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` +} + +type SpecDistributionModulesLoggingOpensearchType string + +var enumValues_SpecDistributionModulesLoggingOpensearchType = []interface{}{ + "single", + "triple", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLoggingOpensearchType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolType { + for _, expected := range enumValues_SpecDistributionModulesLoggingOpensearchType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingOpensearchType, v) } - *j = SpecKubernetesNodePoolType(v) + *j = SpecDistributionModulesLoggingOpensearchType(v) return nil } +const ( + SpecDistributionModulesLoggingOpensearchTypeSingle SpecDistributionModulesLoggingOpensearchType = "single" + SpecDistributionModulesLoggingOpensearchTypeTriple SpecDistributionModulesLoggingOpensearchType = "triple" +) + +type SpecDistributionModulesLoggingOpensearch struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // Resources corresponds to the JSON schema field "resources". + Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` + + // The storage size for the OpenSearch volumes. Follows Kubernetes resources + // storage requests. Default is `150Gi`. + StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` + + // The type of OpenSearch deployment. One of: `single` for a single replica or + // `triple` for an HA 3-replicas deployment. + Type SpecDistributionModulesLoggingOpensearchType `json:"type" yaml:"type" mapstructure:"type"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecInfrastructureVpc) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLoggingOpensearch) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["network"]; !ok || v == nil { - return fmt.Errorf("field network in SpecInfrastructureVpc: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesLoggingOpensearch: required") } - type Plain SpecInfrastructureVpc + type Plain SpecDistributionModulesLoggingOpensearch var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecInfrastructureVpc(plain) + *j = SpecDistributionModulesLoggingOpensearch(plain) return nil } +// Configuration for the Logging Operator. +type SpecDistributionModulesLoggingOperator struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +type SpecDistributionModulesLoggingType string + +var enumValues_SpecDistributionModulesLoggingType = []interface{}{ + "none", + "opensearch", + "loki", + "customOutputs", +} + +const ( + SpecDistributionModulesMonitoringTypeNone SpecDistributionModulesMonitoringType = "none" + SpecDistributionModulesLoggingTypeNone SpecDistributionModulesLoggingType = "none" + SpecDistributionModulesLoggingTypeOpensearch SpecDistributionModulesLoggingType = "opensearch" + SpecDistributionModulesLoggingTypeLoki SpecDistributionModulesLoggingType = "loki" + SpecDistributionModulesLoggingTypeCustomOutputs SpecDistributionModulesLoggingType = "customOutputs" +) + +// Configuration for the Logging module. +type SpecDistributionModulesLogging struct { + // Cerebro corresponds to the JSON schema field "cerebro". + Cerebro *SpecDistributionModulesLoggingCerebro `json:"cerebro,omitempty" yaml:"cerebro,omitempty" mapstructure:"cerebro,omitempty"` + + // CustomOutputs corresponds to the JSON schema field "customOutputs". + CustomOutputs *SpecDistributionModulesLoggingCustomOutputs `json:"customOutputs,omitempty" yaml:"customOutputs,omitempty" mapstructure:"customOutputs,omitempty"` + + // Loki corresponds to the JSON schema field "loki". + Loki *SpecDistributionModulesLoggingLoki `json:"loki,omitempty" yaml:"loki,omitempty" mapstructure:"loki,omitempty"` + + // Minio corresponds to the JSON schema field "minio". + Minio *SpecDistributionModulesLoggingMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"` + + // Opensearch corresponds to the JSON schema field "opensearch". + Opensearch *SpecDistributionModulesLoggingOpensearch `json:"opensearch,omitempty" yaml:"opensearch,omitempty" mapstructure:"opensearch,omitempty"` + + // Operator corresponds to the JSON schema field "operator". + Operator *SpecDistributionModulesLoggingOperator `json:"operator,omitempty" yaml:"operator,omitempty" mapstructure:"operator,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // Selects the logging stack. Options are: + // - `none`: will disable the centralized logging. + // - `opensearch`: will deploy and configure the Logging Operator and an + // OpenSearch cluster (can be single or triple for HA) where the logs will be + // stored. + // - `loki`: will use a distributed Grafana Loki instead of OpenSearch for + // storage. + // - `customOuputs`: the Logging Operator will be deployed and installed but + // without in-cluster storage, you will have to create the needed Outputs and + // ClusterOutputs to ship the logs to your desired storage. + // + // Default is `opensearch`. + Type SpecDistributionModulesLoggingType `json:"type" yaml:"type" mapstructure:"type"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecInfrastructureVpcNetwork) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLogging) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["cidr"]; !ok || v == nil { - return fmt.Errorf("field cidr in SpecInfrastructureVpcNetwork: required") - } - if v, ok := raw["subnetsCidrs"]; !ok || v == nil { - return fmt.Errorf("field subnetsCidrs in SpecInfrastructureVpcNetwork: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesLogging: required") } - type Plain SpecInfrastructureVpcNetwork + type Plain SpecDistributionModulesLogging var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecInfrastructureVpcNetwork(plain) + *j = SpecDistributionModulesLogging(plain) return nil } +type SpecDistributionModulesMonitoringAlertManager struct { + // The webhook URL to send dead man's switch monitoring, for example to use with + // healthchecks.io. + DeadManSwitchWebhookUrl *string `json:"deadManSwitchWebhookUrl,omitempty" yaml:"deadManSwitchWebhookUrl,omitempty" mapstructure:"deadManSwitchWebhookUrl,omitempty"` + + // Set to false to avoid installing the Prometheus rules (alerts) included with + // the distribution. + InstallDefaultRules *bool `json:"installDefaultRules,omitempty" yaml:"installDefaultRules,omitempty" mapstructure:"installDefaultRules,omitempty"` + + // The Slack webhook URL where to send the infrastructural and workload alerts to. + SlackWebhookUrl *string `json:"slackWebhookUrl,omitempty" yaml:"slackWebhookUrl,omitempty" mapstructure:"slackWebhookUrl,omitempty"` +} + +type SpecDistributionModulesMonitoringBlackboxExporter struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +type SpecDistributionModulesMonitoringGrafana struct { + // Setting this to true will deploy an additional `grafana-basic-auth` ingress + // protected with Grafana's basic auth instead of SSO. It's intended use is as a + // temporary ingress for when there are problems with the SSO login flow. + // + // Notice that by default anonymous access is enabled. + BasicAuthIngress *bool `json:"basicAuthIngress,omitempty" yaml:"basicAuthIngress,omitempty" mapstructure:"basicAuthIngress,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // [JMESPath](http://jmespath.org/examples.html) expression to retrieve the user's + // role. Example: + // + // ```yaml + // usersRoleAttributePath: "contains(groups[*], 'beta') && 'Admin' || + // contains(groups[*], 'gamma') && 'Editor' || contains(groups[*], 'delta') && + // 'Viewer' + // ``` + // + // More details in [Grafana's + // documentation](https://grafana.com/docs/grafana/latest/setup-grafana/configure-security/configure-authentication/generic-oauth/#configure-role-mapping). + UsersRoleAttributePath *string `json:"usersRoleAttributePath,omitempty" yaml:"usersRoleAttributePath,omitempty" mapstructure:"usersRoleAttributePath,omitempty"` +} + +type SpecDistributionModulesMonitoringKubeStateMetrics struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +type SpecDistributionModulesMonitoringMimirBackend string + +var enumValues_SpecDistributionModulesMonitoringMimirBackend = []interface{}{ + "minio", + "externalEndpoint", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesConfigMapGeneratorResource) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecDistributionModulesMonitoringMimirBackend) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionCustomPatchesConfigMapGeneratorResource: required") + var ok bool + for _, expected := range enumValues_SpecDistributionModulesMonitoringMimirBackend { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecDistributionCustomPatchesConfigMapGeneratorResource - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringMimirBackend, v) } - *j = SpecDistributionCustomPatchesConfigMapGeneratorResource(plain) + *j = SpecDistributionModulesMonitoringMimirBackend(v) return nil } +const ( + SpecDistributionModulesMonitoringMimirBackendMinio SpecDistributionModulesMonitoringMimirBackend = "minio" + SpecDistributionModulesMonitoringMimirBackendExternalEndpoint SpecDistributionModulesMonitoringMimirBackend = "externalEndpoint" +) + +// Configuration for Mimir's external storage backend. +type SpecDistributionModulesMonitoringMimirExternalEndpoint struct { + // The access key ID (username) for the external S3-compatible bucket. + AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` + + // The bucket name of the external S3-compatible object storage. + BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` + + // The external S3-compatible endpoint for Mimir's storage. + Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` + + // If true, will use HTTP as protocol instead of HTTPS. + Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` + + // The secret access key (password) for the external S3-compatible bucket. + SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` +} + +// Configuration for the Mimir package. +type SpecDistributionModulesMonitoringMimir struct { + // The storage backend type for Mimir. `minio` will use an in-cluster MinIO + // deployment for object storage, `externalEndpoint` can be used to point to an + // external S3-compatible object storage instead of deploying an in-cluster MinIO. + Backend *SpecDistributionModulesMonitoringMimirBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` + + // Configuration for Mimir's external storage backend. + ExternalEndpoint *SpecDistributionModulesMonitoringMimirExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // The retention time for the logs stored in Mimir. Default is `30d`. Value must + // match the regular expression `[0-9]+(ns|us|µs|ms|s|m|h|d|w|y)` where y = 365 + // days. + RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` +} + +type SpecDistributionModulesMonitoringMinioRootUser struct { + // The password for the default MinIO root user. + Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` + + // The username for the default MinIO root user. + Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` +} + +// Configuration for Monitoring's MinIO deployment. +type SpecDistributionModulesMonitoringMinio struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // RootUser corresponds to the JSON schema field "rootUser". + RootUser *SpecDistributionModulesMonitoringMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` + + // The PVC size for each MinIO disk, 6 disks total. + StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` +} + +type SpecDistributionModulesMonitoringPrometheusRemoteWriteElem map[string]interface{} + +type SpecDistributionModulesMonitoringPrometheus struct { + // Set this option to ship the collected metrics to a remote Prometheus receiver. + // + // `remoteWrite` is an array of objects that allows configuring the + // [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for + // Prometheus. The objects in the array follow [the same schema as in the + // prometheus + // operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec). + RemoteWrite []SpecDistributionModulesMonitoringPrometheusRemoteWriteElem `json:"remoteWrite,omitempty" yaml:"remoteWrite,omitempty" mapstructure:"remoteWrite,omitempty"` + + // Resources corresponds to the JSON schema field "resources". + Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` + + // The retention size for the `k8s` Prometheus instance. + RetentionSize *string `json:"retentionSize,omitempty" yaml:"retentionSize,omitempty" mapstructure:"retentionSize,omitempty"` + + // The retention time for the `k8s` Prometheus instance. + RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` + + // The storage size for the `k8s` Prometheus instance. + StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` +} + +type SpecDistributionModulesMonitoringPrometheusAgentRemoteWriteElem map[string]interface{} + +type SpecDistributionModulesMonitoringPrometheusAgent struct { + // Set this option to ship the collected metrics to a remote Prometheus receiver. + // + // `remoteWrite` is an array of objects that allows configuring the + // [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for + // Prometheus. The objects in the array follow [the same schema as in the + // prometheus + // operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec). + RemoteWrite []SpecDistributionModulesMonitoringPrometheusAgentRemoteWriteElem `json:"remoteWrite,omitempty" yaml:"remoteWrite,omitempty" mapstructure:"remoteWrite,omitempty"` + + // Resources corresponds to the JSON schema field "resources". + Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` +} + +type SpecDistributionModulesMonitoringType string + +var enumValues_SpecDistributionModulesMonitoringType = []interface{}{ + "none", + "prometheus", + "prometheusAgent", + "mimir", +} + +const TypesAwsRegionApEast1 TypesAwsRegion = "ap-east-1" + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePool) UnmarshalJSON(b []byte) error { +func (j *Metadata) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["instance"]; !ok || v == nil { - return fmt.Errorf("field instance in SpecKubernetesNodePool: required") - } if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecKubernetesNodePool: required") - } - if v, ok := raw["size"]; !ok || v == nil { - return fmt.Errorf("field size in SpecKubernetesNodePool: required") + return fmt.Errorf("field name in Metadata: required") } - type Plain SpecKubernetesNodePool + type Plain Metadata var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesNodePool(plain) + if len(plain.Name) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "name", 1) + } + if len(plain.Name) > 56 { + return fmt.Errorf("field %s length: must be <= %d", "name", 56) + } + *j = Metadata(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDrType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesNetworkingType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesDrType { + for _, expected := range enumValues_SpecDistributionModulesNetworkingType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesNetworkingType, v) } - *j = SpecDistributionModulesDrType(v) + *j = SpecDistributionModulesNetworkingType(v) return nil } -var enumValues_SpecKubernetesNodePoolsLaunchKind = []interface{}{ - "launch_configurations", - "launch_templates", - "both", +const ( + SpecDistributionModulesMonitoringTypePrometheusAgent SpecDistributionModulesMonitoringType = "prometheusAgent" + SpecDistributionModulesMonitoringTypeMimir SpecDistributionModulesMonitoringType = "mimir" +) + +type SpecDistributionModulesMonitoringX509Exporter struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +// Configuration for the Monitoring module. +type SpecDistributionModulesMonitoring struct { + // Alertmanager corresponds to the JSON schema field "alertmanager". + Alertmanager *SpecDistributionModulesMonitoringAlertManager `json:"alertmanager,omitempty" yaml:"alertmanager,omitempty" mapstructure:"alertmanager,omitempty"` + + // BlackboxExporter corresponds to the JSON schema field "blackboxExporter". + BlackboxExporter *SpecDistributionModulesMonitoringBlackboxExporter `json:"blackboxExporter,omitempty" yaml:"blackboxExporter,omitempty" mapstructure:"blackboxExporter,omitempty"` + + // Grafana corresponds to the JSON schema field "grafana". + Grafana *SpecDistributionModulesMonitoringGrafana `json:"grafana,omitempty" yaml:"grafana,omitempty" mapstructure:"grafana,omitempty"` + + // KubeStateMetrics corresponds to the JSON schema field "kubeStateMetrics". + KubeStateMetrics *SpecDistributionModulesMonitoringKubeStateMetrics `json:"kubeStateMetrics,omitempty" yaml:"kubeStateMetrics,omitempty" mapstructure:"kubeStateMetrics,omitempty"` + + // Mimir corresponds to the JSON schema field "mimir". + Mimir *SpecDistributionModulesMonitoringMimir `json:"mimir,omitempty" yaml:"mimir,omitempty" mapstructure:"mimir,omitempty"` + + // Minio corresponds to the JSON schema field "minio". + Minio *SpecDistributionModulesMonitoringMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // Prometheus corresponds to the JSON schema field "prometheus". + Prometheus *SpecDistributionModulesMonitoringPrometheus `json:"prometheus,omitempty" yaml:"prometheus,omitempty" mapstructure:"prometheus,omitempty"` + + // PrometheusAgent corresponds to the JSON schema field "prometheusAgent". + PrometheusAgent *SpecDistributionModulesMonitoringPrometheusAgent `json:"prometheusAgent,omitempty" yaml:"prometheusAgent,omitempty" mapstructure:"prometheusAgent,omitempty"` + + // The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or + // `mimir`. + // + // - `none`: will disable the whole monitoring stack. + // - `prometheus`: will install Prometheus Operator and a preconfigured Prometheus + // instance, Alertmanager, a set of alert rules, exporters needed to monitor all + // the components of the cluster, Grafana and a series of dashboards to view the + // collected metrics, and more. + // - `prometheusAgent`: will install Prometheus operator, an instance of + // Prometheus in Agent mode (no alerting, no queries, no storage), and all the + // exporters needed to get metrics for the status of the cluster and the + // workloads. Useful when having a centralized (remote) Prometheus where to ship + // the metrics and not storing them locally in the cluster. + // - `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir + // that allows for longer retention of metrics and the usage of Object Storage. + // + // Default is `prometheus`. + Type SpecDistributionModulesMonitoringType `json:"type" yaml:"type" mapstructure:"type"` + + // X509Exporter corresponds to the JSON schema field "x509Exporter". + X509Exporter *SpecDistributionModulesMonitoringX509Exporter `json:"x509Exporter,omitempty" yaml:"x509Exporter,omitempty" mapstructure:"x509Exporter,omitempty"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolsLaunchKind) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesMonitoring) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesMonitoring: required") + } + type Plain SpecDistributionModulesMonitoring + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesMonitoring(plain) + return nil +} + +type SpecDistributionModulesNetworkingTigeraOperator struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +type SpecDistributionModulesNetworkingType string + +var enumValues_SpecDistributionModulesNetworkingType = []interface{}{ + "none", +} + +const ( + SpecDistributionModulesMonitoringTypePrometheus SpecDistributionModulesMonitoringType = "prometheus" + SpecDistributionModulesNetworkingTypeNone SpecDistributionModulesNetworkingType = "none" +) + +// Configuration for the Networking module. +type SpecDistributionModulesNetworking struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // TigeraOperator corresponds to the JSON schema field "tigeraOperator". + TigeraOperator *SpecDistributionModulesNetworkingTigeraOperator `json:"tigeraOperator,omitempty" yaml:"tigeraOperator,omitempty" mapstructure:"tigeraOperator,omitempty"` + + // Type corresponds to the JSON schema field "type". + Type *SpecDistributionModulesNetworkingType `json:"type,omitempty" yaml:"type,omitempty" mapstructure:"type,omitempty"` +} + +type SpecDistributionModulesPolicyGatekeeperEnforcementAction string + +var enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction = []interface{}{ + "deny", + "dryrun", + "warn", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesPolicyGatekeeperEnforcementAction) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolsLaunchKind { + for _, expected := range enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolsLaunchKind, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction, v) } - *j = SpecKubernetesNodePoolsLaunchKind(v) + *j = SpecDistributionModulesPolicyGatekeeperEnforcementAction(v) return nil } +const ( + SpecDistributionModulesPolicyGatekeeperEnforcementActionDeny SpecDistributionModulesPolicyGatekeeperEnforcementAction = "deny" + SpecDistributionModulesPolicyGatekeeperEnforcementActionDryrun SpecDistributionModulesPolicyGatekeeperEnforcementAction = "dryrun" + SpecDistributionModulesPolicyGatekeeperEnforcementActionWarn SpecDistributionModulesPolicyGatekeeperEnforcementAction = "warn" +) + +// Configuration for the Gatekeeper package. +type SpecDistributionModulesPolicyGatekeeper struct { + // This parameter adds namespaces to Gatekeeper's exemption list, so it will not + // enforce the constraints on them. + AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"` + + // The default enforcement action to use for the included constraints. `deny` will + // block the admission when violations to the policies are found, `warn` will show + // a message to the user but will admit the violating requests and `dryrun` won't + // give any feedback to the user but it will log the violations. + EnforcementAction SpecDistributionModulesPolicyGatekeeperEnforcementAction `json:"enforcementAction" yaml:"enforcementAction" mapstructure:"enforcementAction"` + + // Set to `false` to avoid installing the default Gatekeeper policies (constraints + // templates and constraints) included with the distribution. + InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecInfrastructureVpcNetworkSubnetsCidrs) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesPolicyGatekeeper) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["private"]; !ok || v == nil { - return fmt.Errorf("field private in SpecInfrastructureVpcNetworkSubnetsCidrs: required") + if v, ok := raw["enforcementAction"]; !ok || v == nil { + return fmt.Errorf("field enforcementAction in SpecDistributionModulesPolicyGatekeeper: required") } - if v, ok := raw["public"]; !ok || v == nil { - return fmt.Errorf("field public in SpecInfrastructureVpcNetworkSubnetsCidrs: required") + if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { + return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyGatekeeper: required") } - type Plain SpecInfrastructureVpcNetworkSubnetsCidrs + type Plain SpecDistributionModulesPolicyGatekeeper var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecInfrastructureVpcNetworkSubnetsCidrs(plain) + *j = SpecDistributionModulesPolicyGatekeeper(plain) return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistribution) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["modules"]; !ok || v == nil { - return fmt.Errorf("field modules in SpecDistribution: required") - } - type Plain SpecDistribution - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistribution(plain) - return nil +type SpecDistributionModulesPolicyKyvernoValidationFailureAction string + +var enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction = []interface{}{ + "Audit", + "Enforce", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModules) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecDistributionModulesPolicyKyvernoValidationFailureAction) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["dr"]; !ok || v == nil { - return fmt.Errorf("field dr in SpecDistributionModules: required") - } - if v, ok := raw["ingress"]; !ok || v == nil { - return fmt.Errorf("field ingress in SpecDistributionModules: required") - } - if v, ok := raw["logging"]; !ok || v == nil { - return fmt.Errorf("field logging in SpecDistributionModules: required") - } - if v, ok := raw["policy"]; !ok || v == nil { - return fmt.Errorf("field policy in SpecDistributionModules: required") + var ok bool + for _, expected := range enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecDistributionModules - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction, v) } - *j = SpecDistributionModules(plain) + *j = SpecDistributionModulesPolicyKyvernoValidationFailureAction(v) return nil } -var enumValues_TypesAwsRegion = []interface{}{ - "af-south-1", - "ap-east-1", - "ap-northeast-1", - "ap-northeast-2", - "ap-northeast-3", - "ap-south-1", - "ap-south-2", - "ap-southeast-1", - "ap-southeast-2", - "ap-southeast-3", - "ap-southeast-4", - "ca-central-1", - "eu-central-1", - "eu-central-2", - "eu-north-1", - "eu-south-1", - "eu-south-2", - "eu-west-1", - "eu-west-2", - "eu-west-3", - "me-central-1", - "me-south-1", - "sa-east-1", - "us-east-1", - "us-east-2", - "us-gov-east-1", - "us-gov-west-1", - "us-west-1", - "us-west-2", +const ( + SpecDistributionModulesPolicyKyvernoValidationFailureActionAudit SpecDistributionModulesPolicyKyvernoValidationFailureAction = "Audit" + SpecDistributionModulesPolicyKyvernoValidationFailureActionEnforce SpecDistributionModulesPolicyKyvernoValidationFailureAction = "Enforce" +) + +// Configuration for the Kyverno package. +type SpecDistributionModulesPolicyKyverno struct { + // This parameter adds namespaces to Kyverno's exemption list, so it will not + // enforce the policies on them. + AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"` + + // Set to `false` to avoid installing the default Kyverno policies included with + // distribution. + InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // The validation failure action to use for the policies, `Enforce` will block + // when a request does not comply with the policies and `Audit` will not block but + // log when a request does not comply with the policies. + ValidationFailureAction SpecDistributionModulesPolicyKyvernoValidationFailureAction `json:"validationFailureAction" yaml:"validationFailureAction" mapstructure:"validationFailureAction"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetes) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesPolicyKyverno) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["apiServer"]; !ok || v == nil { - return fmt.Errorf("field apiServer in SpecKubernetes: required") - } - if v, ok := raw["nodeAllowedSshPublicKey"]; !ok || v == nil { - return fmt.Errorf("field nodeAllowedSshPublicKey in SpecKubernetes: required") - } - if v, ok := raw["nodePools"]; !ok || v == nil { - return fmt.Errorf("field nodePools in SpecKubernetes: required") + if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { + return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyKyverno: required") } - if v, ok := raw["nodePoolsLaunchKind"]; !ok || v == nil { - return fmt.Errorf("field nodePoolsLaunchKind in SpecKubernetes: required") + if v, ok := raw["validationFailureAction"]; !ok || v == nil { + return fmt.Errorf("field validationFailureAction in SpecDistributionModulesPolicyKyverno: required") } - type Plain SpecKubernetes + type Plain SpecDistributionModulesPolicyKyverno var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetes(plain) + *j = SpecDistributionModulesPolicyKyverno(plain) return nil } +type SpecDistributionModulesPolicyType string + +var enumValues_SpecDistributionModulesPolicyType = []interface{}{ + "none", + "gatekeeper", + "kyverno", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesTracing) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecDistributionModulesPolicyType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesTracing: required") + var ok bool + for _, expected := range enumValues_SpecDistributionModulesPolicyType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecDistributionModulesTracing - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyType, v) } - *j = SpecDistributionModulesTracing(plain) + *j = SpecDistributionModulesPolicyType(v) return nil } +const ( + SpecDistributionModulesPolicyTypeNone SpecDistributionModulesPolicyType = "none" + SpecDistributionModulesPolicyTypeGatekeeper SpecDistributionModulesPolicyType = "gatekeeper" + SpecDistributionModulesPolicyTypeKyverno SpecDistributionModulesPolicyType = "kyverno" +) + +// Configuration for the Policy module. +type SpecDistributionModulesPolicy struct { + // Gatekeeper corresponds to the JSON schema field "gatekeeper". + Gatekeeper *SpecDistributionModulesPolicyGatekeeper `json:"gatekeeper,omitempty" yaml:"gatekeeper,omitempty" mapstructure:"gatekeeper,omitempty"` + + // Kyverno corresponds to the JSON schema field "kyverno". + Kyverno *SpecDistributionModulesPolicyKyverno `json:"kyverno,omitempty" yaml:"kyverno,omitempty" mapstructure:"kyverno,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // The type of policy enforcement to use, either `none`, `gatekeeper` or + // `kyverno`. + // + // Default is `none`. + Type SpecDistributionModulesPolicyType `json:"type" yaml:"type" mapstructure:"type"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecPluginsHelmReleasesElemSetElem) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesPolicy) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecPluginsHelmReleasesElemSetElem: required") - } - if v, ok := raw["value"]; !ok || v == nil { - return fmt.Errorf("field value in SpecPluginsHelmReleasesElemSetElem: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesPolicy: required") } - type Plain SpecPluginsHelmReleasesElemSetElem + type Plain SpecDistributionModulesPolicy var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecPluginsHelmReleasesElemSetElem(plain) + *j = SpecDistributionModulesPolicy(plain) return nil } +type SpecDistributionModulesTracingMinioRootUser struct { + // The password for the default MinIO root user. + Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` + + // The username for the default MinIO root user. + Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` +} + +// Configuration for Tracing's MinIO deployment. +type SpecDistributionModulesTracingMinio struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // RootUser corresponds to the JSON schema field "rootUser". + RootUser *SpecDistributionModulesTracingMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` + + // The PVC size for each MinIO disk, 6 disks total. + StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` +} + +type SpecDistributionModulesTracingTempoBackend string + +var enumValues_SpecDistributionModulesTracingTempoBackend = []interface{}{ + "minio", + "externalEndpoint", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *TypesAwsRegion) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesTracingTempoBackend) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_TypesAwsRegion { + for _, expected := range enumValues_SpecDistributionModulesTracingTempoBackend { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesAwsRegion, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingTempoBackend, v) } - *j = TypesAwsRegion(v) + *j = SpecDistributionModulesTracingTempoBackend(v) return nil } +const ( + SpecDistributionModulesTracingTempoBackendMinio SpecDistributionModulesTracingTempoBackend = "minio" + SpecDistributionModulesTracingTempoBackendExternalEndpoint SpecDistributionModulesTracingTempoBackend = "externalEndpoint" +) + +// Configuration for Tempo's external storage backend. +type SpecDistributionModulesTracingTempoExternalEndpoint struct { + // The access key ID (username) for the external S3-compatible bucket. + AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` + + // The bucket name of the external S3-compatible object storage. + BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` + + // The external S3-compatible endpoint for Tempo's storage. + Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` + + // If true, will use HTTP as protocol instead of HTTPS. + Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` + + // The secret access key (password) for the external S3-compatible bucket. + SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` +} + +// Configuration for the Tempo package. +type SpecDistributionModulesTracingTempo struct { + // The storage backend type for Tempo. `minio` will use an in-cluster MinIO + // deployment for object storage, `externalEndpoint` can be used to point to an + // external S3-compatible object storage instead of deploying an in-cluster MinIO. + Backend *SpecDistributionModulesTracingTempoBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` + + // Configuration for Tempo's external storage backend. + ExternalEndpoint *SpecDistributionModulesTracingTempoExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // The retention time for the traces stored in Tempo. + RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` +} + +type SpecDistributionModulesTracingType string + +var enumValues_SpecDistributionModulesTracingType = []interface{}{ + "none", + "tempo", +} + // UnmarshalJSON implements json.Unmarshaler. func (j *SpecDistributionModulesTracingType) UnmarshalJSON(b []byte) error { var v string @@ -3168,1053 +2717,1888 @@ func (j *SpecDistributionModulesTracingType) UnmarshalJSON(b []byte) error { return nil } -var enumValues_SpecDistributionModulesTracingType = []interface{}{ - "none", - "tempo", +const ( + SpecDistributionModulesTracingTypeNone SpecDistributionModulesTracingType = "none" + SpecDistributionModulesTracingTypeTempo SpecDistributionModulesTracingType = "tempo" +) + +// Configuration for the Tracing module. +type SpecDistributionModulesTracing struct { + // Minio corresponds to the JSON schema field "minio". + Minio *SpecDistributionModulesTracingMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // Tempo corresponds to the JSON schema field "tempo". + Tempo *SpecDistributionModulesTracingTempo `json:"tempo,omitempty" yaml:"tempo,omitempty" mapstructure:"tempo,omitempty"` + + // The type of tracing to use, either `none` or `tempo`. `none` will disable the + // Tracing module and `tempo` will install a Grafana Tempo deployment. + // + // Default is `tempo`. + Type SpecDistributionModulesTracingType `json:"type" yaml:"type" mapstructure:"type"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesTracingTempoBackend) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecDistributionModulesTracing) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesTracingTempoBackend { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesTracing: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingTempoBackend, v) + type Plain SpecDistributionModulesTracing + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err } - *j = SpecDistributionModulesTracingTempoBackend(v) + *j = SpecDistributionModulesTracing(plain) + return nil +} + +type SpecDistributionModules struct { + // Auth corresponds to the JSON schema field "auth". + Auth *SpecDistributionModulesAuth `json:"auth,omitempty" yaml:"auth,omitempty" mapstructure:"auth,omitempty"` + + // Aws corresponds to the JSON schema field "aws". + Aws *SpecDistributionModulesAws `json:"aws,omitempty" yaml:"aws,omitempty" mapstructure:"aws,omitempty"` + + // Dr corresponds to the JSON schema field "dr". + Dr SpecDistributionModulesDr `json:"dr" yaml:"dr" mapstructure:"dr"` + + // Ingress corresponds to the JSON schema field "ingress". + Ingress SpecDistributionModulesIngress `json:"ingress" yaml:"ingress" mapstructure:"ingress"` + + // Logging corresponds to the JSON schema field "logging". + Logging SpecDistributionModulesLogging `json:"logging" yaml:"logging" mapstructure:"logging"` + + // Monitoring corresponds to the JSON schema field "monitoring". + Monitoring *SpecDistributionModulesMonitoring `json:"monitoring,omitempty" yaml:"monitoring,omitempty" mapstructure:"monitoring,omitempty"` + + // Networking corresponds to the JSON schema field "networking". + Networking *SpecDistributionModulesNetworking `json:"networking,omitempty" yaml:"networking,omitempty" mapstructure:"networking,omitempty"` + + // Policy corresponds to the JSON schema field "policy". + Policy SpecDistributionModulesPolicy `json:"policy" yaml:"policy" mapstructure:"policy"` + + // Tracing corresponds to the JSON schema field "tracing". + Tracing *SpecDistributionModulesTracing `json:"tracing,omitempty" yaml:"tracing,omitempty" mapstructure:"tracing,omitempty"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModules) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["dr"]; !ok || v == nil { + return fmt.Errorf("field dr in SpecDistributionModules: required") + } + if v, ok := raw["ingress"]; !ok || v == nil { + return fmt.Errorf("field ingress in SpecDistributionModules: required") + } + if v, ok := raw["logging"]; !ok || v == nil { + return fmt.Errorf("field logging in SpecDistributionModules: required") + } + if v, ok := raw["policy"]; !ok || v == nil { + return fmt.Errorf("field policy in SpecDistributionModules: required") + } + type Plain SpecDistributionModules + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModules(plain) return nil } -var enumValues_SpecDistributionModulesTracingTempoBackend = []interface{}{ - "minio", - "externalEndpoint", +type SpecDistribution struct { + // Common corresponds to the JSON schema field "common". + Common *SpecDistributionCommon `json:"common,omitempty" yaml:"common,omitempty" mapstructure:"common,omitempty"` + + // CustomPatches corresponds to the JSON schema field "customPatches". + CustomPatches *SpecDistributionCustompatches `json:"customPatches,omitempty" yaml:"customPatches,omitempty" mapstructure:"customPatches,omitempty"` + + // Modules corresponds to the JSON schema field "modules". + Modules SpecDistributionModules `json:"modules" yaml:"modules" mapstructure:"modules"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicy) UnmarshalJSON(b []byte) error { +func (j *SpecDistribution) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesPolicy: required") + if v, ok := raw["modules"]; !ok || v == nil { + return fmt.Errorf("field modules in SpecDistribution: required") } - type Plain SpecDistributionModulesPolicy + type Plain SpecDistribution var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesPolicy(plain) + *j = SpecDistribution(plain) return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesPolicyType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyType, v) - } - *j = SpecDistributionModulesPolicyType(v) - return nil +type TypesCidr string + +// Network CIDRS configuration for private and public subnets. +type SpecInfrastructureVpcNetworkSubnetsCidrs struct { + // The network CIDRs for the private subnets, where the nodes, the pods, and the + // private load balancers will be created + Private []TypesCidr `json:"private" yaml:"private" mapstructure:"private"` + + // The network CIDRs for the public subnets, where the public load balancers and + // the VPN servers will be created + Public []TypesCidr `json:"public" yaml:"public" mapstructure:"public"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecToolsConfigurationTerraformStateS3) UnmarshalJSON(b []byte) error { +func (j *SpecInfrastructureVpcNetworkSubnetsCidrs) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["bucketName"]; !ok || v == nil { - return fmt.Errorf("field bucketName in SpecToolsConfigurationTerraformStateS3: required") - } - if v, ok := raw["keyPrefix"]; !ok || v == nil { - return fmt.Errorf("field keyPrefix in SpecToolsConfigurationTerraformStateS3: required") + if v, ok := raw["private"]; !ok || v == nil { + return fmt.Errorf("field private in SpecInfrastructureVpcNetworkSubnetsCidrs: required") } - if v, ok := raw["region"]; !ok || v == nil { - return fmt.Errorf("field region in SpecToolsConfigurationTerraformStateS3: required") + if v, ok := raw["public"]; !ok || v == nil { + return fmt.Errorf("field public in SpecInfrastructureVpcNetworkSubnetsCidrs: required") } - type Plain SpecToolsConfigurationTerraformStateS3 + type Plain SpecInfrastructureVpcNetworkSubnetsCidrs var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecToolsConfigurationTerraformStateS3(plain) + *j = SpecInfrastructureVpcNetworkSubnetsCidrs(plain) return nil } +type SpecInfrastructureVpcNetwork struct { + // The network CIDR for the VPC that will be created + Cidr TypesCidr `json:"cidr" yaml:"cidr" mapstructure:"cidr"` + + // SubnetsCidrs corresponds to the JSON schema field "subnetsCidrs". + SubnetsCidrs SpecInfrastructureVpcNetworkSubnetsCidrs `json:"subnetsCidrs" yaml:"subnetsCidrs" mapstructure:"subnetsCidrs"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingOpensearch) UnmarshalJSON(b []byte) error { +func (j *SpecInfrastructureVpcNetwork) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesLoggingOpensearch: required") + if v, ok := raw["cidr"]; !ok || v == nil { + return fmt.Errorf("field cidr in SpecInfrastructureVpcNetwork: required") } - type Plain SpecDistributionModulesLoggingOpensearch + if v, ok := raw["subnetsCidrs"]; !ok || v == nil { + return fmt.Errorf("field subnetsCidrs in SpecInfrastructureVpcNetwork: required") + } + type Plain SpecInfrastructureVpcNetwork var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesLoggingOpensearch(plain) + *j = SpecInfrastructureVpcNetwork(plain) return nil } +// Configuration for the VPC that will be created to host the EKS cluster and its +// related resources. If you already have a VPC that you want to use, leave this +// section empty and use `.spec.kubernetes.vpcId` instead. +type SpecInfrastructureVpc struct { + // Network corresponds to the JSON schema field "network". + Network SpecInfrastructureVpcNetwork `json:"network" yaml:"network" mapstructure:"network"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecToolsConfigurationTerraformState) UnmarshalJSON(b []byte) error { +func (j *SpecInfrastructureVpc) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["s3"]; !ok || v == nil { - return fmt.Errorf("field s3 in SpecToolsConfigurationTerraformState: required") + if v, ok := raw["network"]; !ok || v == nil { + return fmt.Errorf("field network in SpecInfrastructureVpc: required") } - type Plain SpecToolsConfigurationTerraformState + type Plain SpecInfrastructureVpc var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecToolsConfigurationTerraformState(plain) + *j = SpecInfrastructureVpc(plain) return nil } +type TypesAwsS3BucketNamePrefix string + +type TypesTcpPort int + +type SpecInfrastructureVpnSsh struct { + // The network CIDR enabled in the security group to access the VPN servers + // (bastions) via SSH. Setting this to `0.0.0.0/0` will allow any source. + AllowedFromCidrs []TypesCidr `json:"allowedFromCidrs" yaml:"allowedFromCidrs" mapstructure:"allowedFromCidrs"` + + // List of GitHub usernames from whom get their SSH public key and add as + // authorized keys of the `operatorName` user. + GithubUsersName []string `json:"githubUsersName" yaml:"githubUsersName" mapstructure:"githubUsersName"` + + // **NOT IN USE**, use `githubUsersName` instead. This value defines the public + // keys that will be added to the bastion's operating system. + PublicKeys []interface{} `json:"publicKeys,omitempty" yaml:"publicKeys,omitempty" mapstructure:"publicKeys,omitempty"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyKyverno) UnmarshalJSON(b []byte) error { +func (j *SpecInfrastructureVpnSsh) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { - return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyKyverno: required") + if v, ok := raw["allowedFromCidrs"]; !ok || v == nil { + return fmt.Errorf("field allowedFromCidrs in SpecInfrastructureVpnSsh: required") } - if v, ok := raw["validationFailureAction"]; !ok || v == nil { - return fmt.Errorf("field validationFailureAction in SpecDistributionModulesPolicyKyverno: required") + if v, ok := raw["githubUsersName"]; !ok || v == nil { + return fmt.Errorf("field githubUsersName in SpecInfrastructureVpnSsh: required") } - type Plain SpecDistributionModulesPolicyKyverno + type Plain SpecInfrastructureVpnSsh var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesPolicyKyverno(plain) + if plain.GithubUsersName != nil && len(plain.GithubUsersName) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "githubUsersName", 1) + } + *j = SpecInfrastructureVpnSsh(plain) return nil } +type TypesAwsVpcId string + +// Configuration for the VPN server instances. +type SpecInfrastructureVpn struct { + // This value defines the prefix for the bucket name where the VPN servers will + // store their state (VPN certificates, users). + BucketNamePrefix *TypesAwsS3BucketNamePrefix `json:"bucketNamePrefix,omitempty" yaml:"bucketNamePrefix,omitempty" mapstructure:"bucketNamePrefix,omitempty"` + + // The `dhParamsBits` size used for the creation of the .pem file that will be + // used in the dh openvpn server.conf file. + DhParamsBits *int `json:"dhParamsBits,omitempty" yaml:"dhParamsBits,omitempty" mapstructure:"dhParamsBits,omitempty"` + + // The size of the disk in GB for each VPN server. Example: entering `50` will + // create disks of 50 GB. + DiskSize *int `json:"diskSize,omitempty" yaml:"diskSize,omitempty" mapstructure:"diskSize,omitempty"` + + // Overrides IAM user name for the VPN. Default is to use the cluster name. + IamUserNameOverride *TypesAwsIamRoleName `json:"iamUserNameOverride,omitempty" yaml:"iamUserNameOverride,omitempty" mapstructure:"iamUserNameOverride,omitempty"` + + // The type of the AWS EC2 instance for each VPN server. Follows AWS EC2 + // nomenclature. Example: `t3-micro`. + InstanceType *string `json:"instanceType,omitempty" yaml:"instanceType,omitempty" mapstructure:"instanceType,omitempty"` + + // The number of VPN server instances to create, `0` to skip the creation. + Instances *int `json:"instances,omitempty" yaml:"instances,omitempty" mapstructure:"instances,omitempty"` + + // The username of the account to create in the bastion's operating system. + OperatorName *string `json:"operatorName,omitempty" yaml:"operatorName,omitempty" mapstructure:"operatorName,omitempty"` + + // The port where each OpenVPN server will listen for connections. + Port *TypesTcpPort `json:"port,omitempty" yaml:"port,omitempty" mapstructure:"port,omitempty"` + + // Ssh corresponds to the JSON schema field "ssh". + Ssh SpecInfrastructureVpnSsh `json:"ssh" yaml:"ssh" mapstructure:"ssh"` + + // The ID of the VPC where the VPN server instances will be created, required only + // if `.spec.infrastructure.vpc` is omitted. + VpcId *TypesAwsVpcId `json:"vpcId,omitempty" yaml:"vpcId,omitempty" mapstructure:"vpcId,omitempty"` + + // The network CIDR that will be used to assign IP addresses to the VPN clients + // when connected. + VpnClientsSubnetCidr TypesCidr `json:"vpnClientsSubnetCidr" yaml:"vpnClientsSubnetCidr" mapstructure:"vpnClientsSubnetCidr"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecToolsConfigurationTerraform) UnmarshalJSON(b []byte) error { +func (j *SpecInfrastructureVpn) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["state"]; !ok || v == nil { - return fmt.Errorf("field state in SpecToolsConfigurationTerraform: required") + if v, ok := raw["ssh"]; !ok || v == nil { + return fmt.Errorf("field ssh in SpecInfrastructureVpn: required") } - type Plain SpecToolsConfigurationTerraform + if v, ok := raw["vpnClientsSubnetCidr"]; !ok || v == nil { + return fmt.Errorf("field vpnClientsSubnetCidr in SpecInfrastructureVpn: required") + } + type Plain SpecInfrastructureVpn var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecToolsConfigurationTerraform(plain) + *j = SpecInfrastructureVpn(plain) return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyKyvernoValidationFailureAction) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction, v) - } - *j = SpecDistributionModulesPolicyKyvernoValidationFailureAction(v) - return nil +type SpecInfrastructure struct { + // Vpc corresponds to the JSON schema field "vpc". + Vpc *SpecInfrastructureVpc `json:"vpc,omitempty" yaml:"vpc,omitempty" mapstructure:"vpc,omitempty"` + + // Vpn corresponds to the JSON schema field "vpn". + Vpn *SpecInfrastructureVpn `json:"vpn,omitempty" yaml:"vpn,omitempty" mapstructure:"vpn,omitempty"` +} + +type SpecKubernetesAPIServer struct { + // This value defines if the Kubernetes API server will be accessible from the + // private subnets. Default it `true`. + PrivateAccess bool `json:"privateAccess" yaml:"privateAccess" mapstructure:"privateAccess"` + + // The network CIDRs from the private subnets that will be allowed access the + // Kubernetes API server. + PrivateAccessCidrs []TypesCidr `json:"privateAccessCidrs,omitempty" yaml:"privateAccessCidrs,omitempty" mapstructure:"privateAccessCidrs,omitempty"` + + // This value defines if the Kubernetes API server will be accessible from the + // public subnets. Default is `false`. + PublicAccess bool `json:"publicAccess" yaml:"publicAccess" mapstructure:"publicAccess"` + + // The network CIDRs from the public subnets that will be allowed access the + // Kubernetes API server. + PublicAccessCidrs []TypesCidr `json:"publicAccessCidrs,omitempty" yaml:"publicAccessCidrs,omitempty" mapstructure:"publicAccessCidrs,omitempty"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecToolsConfiguration) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesAPIServer) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["terraform"]; !ok || v == nil { - return fmt.Errorf("field terraform in SpecToolsConfiguration: required") + if v, ok := raw["privateAccess"]; !ok || v == nil { + return fmt.Errorf("field privateAccess in SpecKubernetesAPIServer: required") } - type Plain SpecToolsConfiguration + if v, ok := raw["publicAccess"]; !ok || v == nil { + return fmt.Errorf("field publicAccess in SpecKubernetesAPIServer: required") + } + type Plain SpecKubernetesAPIServer var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecToolsConfiguration(plain) + *j = SpecKubernetesAPIServer(plain) return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior, v) - } - *j = SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior(v) - return nil +type SpecKubernetesAwsAuthRole struct { + // Groups corresponds to the JSON schema field "groups". + Groups []string `json:"groups" yaml:"groups" mapstructure:"groups"` + + // Rolearn corresponds to the JSON schema field "rolearn". + Rolearn TypesAwsArn `json:"rolearn" yaml:"rolearn" mapstructure:"rolearn"` + + // Username corresponds to the JSON schema field "username". + Username string `json:"username" yaml:"username" mapstructure:"username"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *Spec) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesAwsAuthRole) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["distribution"]; !ok || v == nil { - return fmt.Errorf("field distribution in Spec: required") - } - if v, ok := raw["distributionVersion"]; !ok || v == nil { - return fmt.Errorf("field distributionVersion in Spec: required") - } - if v, ok := raw["kubernetes"]; !ok || v == nil { - return fmt.Errorf("field kubernetes in Spec: required") + if v, ok := raw["groups"]; !ok || v == nil { + return fmt.Errorf("field groups in SpecKubernetesAwsAuthRole: required") } - if v, ok := raw["region"]; !ok || v == nil { - return fmt.Errorf("field region in Spec: required") + if v, ok := raw["rolearn"]; !ok || v == nil { + return fmt.Errorf("field rolearn in SpecKubernetesAwsAuthRole: required") } - if v, ok := raw["toolsConfiguration"]; !ok || v == nil { - return fmt.Errorf("field toolsConfiguration in Spec: required") + if v, ok := raw["username"]; !ok || v == nil { + return fmt.Errorf("field username in SpecKubernetesAwsAuthRole: required") } - type Plain Spec + type Plain SpecKubernetesAwsAuthRole var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - if len(plain.DistributionVersion) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "distributionVersion", 1) - } - *j = Spec(plain) + *j = SpecKubernetesAwsAuthRole(plain) return nil } +type SpecKubernetesAwsAuthUser struct { + // Groups corresponds to the JSON schema field "groups". + Groups []string `json:"groups" yaml:"groups" mapstructure:"groups"` + + // Userarn corresponds to the JSON schema field "userarn". + Userarn TypesAwsArn `json:"userarn" yaml:"userarn" mapstructure:"userarn"` + + // Username corresponds to the JSON schema field "username". + Username string `json:"username" yaml:"username" mapstructure:"username"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyGatekeeper) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesAwsAuthUser) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["enforcementAction"]; !ok || v == nil { - return fmt.Errorf("field enforcementAction in SpecDistributionModulesPolicyGatekeeper: required") + if v, ok := raw["groups"]; !ok || v == nil { + return fmt.Errorf("field groups in SpecKubernetesAwsAuthUser: required") } - if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { - return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyGatekeeper: required") + if v, ok := raw["userarn"]; !ok || v == nil { + return fmt.Errorf("field userarn in SpecKubernetesAwsAuthUser: required") } - type Plain SpecDistributionModulesPolicyGatekeeper + if v, ok := raw["username"]; !ok || v == nil { + return fmt.Errorf("field username in SpecKubernetesAwsAuthUser: required") + } + type Plain SpecKubernetesAwsAuthUser var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesPolicyGatekeeper(plain) + *j = SpecKubernetesAwsAuthUser(plain) return nil } +// Optional additional security configuration for EKS IAM via the `aws-auth` +// configmap. +// +// Ref: https://docs.aws.amazon.com/eks/latest/userguide/auth-configmap.html +type SpecKubernetesAwsAuth struct { + // This optional array defines additional AWS accounts that will be added to the + // `aws-auth` configmap. + AdditionalAccounts []string `json:"additionalAccounts,omitempty" yaml:"additionalAccounts,omitempty" mapstructure:"additionalAccounts,omitempty"` + + // This optional array defines additional IAM roles that will be added to the + // `aws-auth` configmap. + Roles []SpecKubernetesAwsAuthRole `json:"roles,omitempty" yaml:"roles,omitempty" mapstructure:"roles,omitempty"` + + // This optional array defines additional IAM users that will be added to the + // `aws-auth` configmap. + Users []SpecKubernetesAwsAuthUser `json:"users,omitempty" yaml:"users,omitempty" mapstructure:"users,omitempty"` +} + +type TypesAwsIamRoleNamePrefix string + +type SpecKubernetesLogRetentionDays int + +var enumValues_SpecKubernetesLogRetentionDays = []interface{}{ + 0, + 1, + 3, + 5, + 7, + 14, + 30, + 60, + 90, + 120, + 150, + 180, + 365, + 400, + 545, + 731, + 1096, + 1827, + 2192, + 2557, + 2922, + 3288, + 3653, +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeToleration) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecKubernetesLogRetentionDays) UnmarshalJSON(b []byte) error { + var v int + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["effect"]; !ok || v == nil { - return fmt.Errorf("field effect in TypesKubeToleration: required") - } - if v, ok := raw["key"]; !ok || v == nil { - return fmt.Errorf("field key in TypesKubeToleration: required") + var ok bool + for _, expected := range enumValues_SpecKubernetesLogRetentionDays { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain TypesKubeToleration - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesLogRetentionDays, v) } - *j = TypesKubeToleration(plain) + *j = SpecKubernetesLogRetentionDays(v) return nil } +type SpecKubernetesLogsTypesElem string + +var enumValues_SpecKubernetesLogsTypesElem = []interface{}{ + "api", + "audit", + "authenticator", + "controllerManager", + "scheduler", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyGatekeeperEnforcementAction) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesLogsTypesElem) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction { + for _, expected := range enumValues_SpecKubernetesLogsTypesElem { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesLogsTypesElem, v) } - *j = SpecDistributionModulesPolicyGatekeeperEnforcementAction(v) + *j = SpecKubernetesLogsTypesElem(v) return nil } +const ( + SpecKubernetesLogsTypesElemApi SpecKubernetesLogsTypesElem = "api" + SpecKubernetesLogsTypesElemAudit SpecKubernetesLogsTypesElem = "audit" + SpecKubernetesLogsTypesElemAuthenticator SpecKubernetesLogsTypesElem = "authenticator" + SpecKubernetesLogsTypesElemControllerManager SpecKubernetesLogsTypesElem = "controllerManager" + SpecKubernetesLogsTypesElemScheduler SpecKubernetesLogsTypesElem = "scheduler" +) + +type SpecKubernetesNodePoolGlobalAmiType string + +var enumValues_SpecKubernetesNodePoolGlobalAmiType = []interface{}{ + "alinux2", + "alinux2023", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesNetworkingType) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolGlobalAmiType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesNetworkingType { + for _, expected := range enumValues_SpecKubernetesNodePoolGlobalAmiType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesNetworkingType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolGlobalAmiType, v) } - *j = SpecDistributionModulesNetworkingType(v) + *j = SpecKubernetesNodePoolGlobalAmiType(v) return nil } +const ( + SpecKubernetesNodePoolGlobalAmiTypeAlinux2 SpecKubernetesNodePoolGlobalAmiType = "alinux2" + SpecKubernetesNodePoolGlobalAmiTypeAlinux2023 SpecKubernetesNodePoolGlobalAmiType = "alinux2023" +) + +// Port range for the Firewall Rule. +type SpecKubernetesNodePoolAdditionalFirewallRulePorts struct { + // From corresponds to the JSON schema field "from". + From TypesTcpPort `json:"from" yaml:"from" mapstructure:"from"` + + // To corresponds to the JSON schema field "to". + To TypesTcpPort `json:"to" yaml:"to" mapstructure:"to"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesMonitoring) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRulePorts) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesMonitoring: required") + if v, ok := raw["from"]; !ok || v == nil { + return fmt.Errorf("field from in SpecKubernetesNodePoolAdditionalFirewallRulePorts: required") } - type Plain SpecDistributionModulesMonitoring + if v, ok := raw["to"]; !ok || v == nil { + return fmt.Errorf("field to in SpecKubernetesNodePoolAdditionalFirewallRulePorts: required") + } + type Plain SpecKubernetesNodePoolAdditionalFirewallRulePorts var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesMonitoring(plain) + *j = SpecKubernetesNodePoolAdditionalFirewallRulePorts(plain) return nil } +type TypesAwsIpProtocol string + +type TypesAwsTags map[string]string + +type SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType string + +var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType = []interface{}{ + "ingress", + "egress", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeTolerationOperator) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_TypesKubeTolerationOperator { + for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationOperator, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType, v) } - *j = TypesKubeTolerationOperator(v) + *j = SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType(v) return nil } -var enumValues_TypesKubeTolerationOperator = []interface{}{ - "Exists", - "Equal", +const ( + SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockTypeIngress SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType = "ingress" + SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockTypeEgress SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType = "egress" +) + +type SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock struct { + // CidrBlocks corresponds to the JSON schema field "cidrBlocks". + CidrBlocks []TypesCidr `json:"cidrBlocks" yaml:"cidrBlocks" mapstructure:"cidrBlocks"` + + // Name corresponds to the JSON schema field "name". + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // Ports corresponds to the JSON schema field "ports". + Ports SpecKubernetesNodePoolAdditionalFirewallRulePorts `json:"ports" yaml:"ports" mapstructure:"ports"` + + // Protocol corresponds to the JSON schema field "protocol". + Protocol TypesAwsIpProtocol `json:"protocol" yaml:"protocol" mapstructure:"protocol"` + + // Additional AWS tags for the Firewall rule. + Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"` + + // The type of the Firewall rule, can be `ingress` for incoming traffic or + // `egress` for outgoing traffic. + Type SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType `json:"type" yaml:"type" mapstructure:"type"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDrVeleroEks) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["bucketName"]; !ok || v == nil { - return fmt.Errorf("field bucketName in SpecDistributionModulesDrVeleroEks: required") - } - if v, ok := raw["iamRoleArn"]; !ok || v == nil { - return fmt.Errorf("field iamRoleArn in SpecDistributionModulesDrVeleroEks: required") + if v, ok := raw["cidrBlocks"]; !ok || v == nil { + return fmt.Errorf("field cidrBlocks in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") } - if v, ok := raw["region"]; !ok || v == nil { - return fmt.Errorf("field region in SpecDistributionModulesDrVeleroEks: required") + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") } - type Plain SpecDistributionModulesDrVeleroEks - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if v, ok := raw["ports"]; !ok || v == nil { + return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") } - *j = SpecDistributionModulesDrVeleroEks(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDrVelero) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err + if v, ok := raw["protocol"]; !ok || v == nil { + return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") } - if v, ok := raw["eks"]; !ok || v == nil { - return fmt.Errorf("field eks in SpecDistributionModulesDrVelero: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") } - type Plain SpecDistributionModulesDrVelero + type Plain SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesDrVelero(plain) + if plain.CidrBlocks != nil && len(plain.CidrBlocks) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "cidrBlocks", 1) + } + *j = SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock(plain) return nil } +type SpecKubernetesNodePoolAdditionalFirewallRuleSelfType string + +var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType = []interface{}{ + "ingress", + "egress", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDr) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSelfType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesDr: required") + var ok bool + for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecDistributionModulesDr - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType, v) } - *j = SpecDistributionModulesDr(plain) + *j = SpecKubernetesNodePoolAdditionalFirewallRuleSelfType(v) return nil } +const ( + SpecKubernetesNodePoolAdditionalFirewallRuleSelfTypeIngress SpecKubernetesNodePoolAdditionalFirewallRuleSelfType = "ingress" + SpecKubernetesNodePoolAdditionalFirewallRuleSelfTypeEgress SpecKubernetesNodePoolAdditionalFirewallRuleSelfType = "egress" +) + +type SpecKubernetesNodePoolAdditionalFirewallRuleSelf struct { + // The name of the Firewall rule. + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // Ports corresponds to the JSON schema field "ports". + Ports SpecKubernetesNodePoolAdditionalFirewallRulePorts `json:"ports" yaml:"ports" mapstructure:"ports"` + + // The protocol of the Firewall rule. + Protocol TypesAwsIpProtocol `json:"protocol" yaml:"protocol" mapstructure:"protocol"` + + // If `true`, the source will be the security group itself. + Self bool `json:"self" yaml:"self" mapstructure:"self"` + + // Additional AWS tags for the Firewall rule. + Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"` + + // The type of the Firewall rule, can be `ingress` for incoming traffic or + // `egress` for outgoing traffic. + Type SpecKubernetesNodePoolAdditionalFirewallRuleSelfType `json:"type" yaml:"type" mapstructure:"type"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressClusterIssuerRoute53) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSelf) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["hostedZoneId"]; !ok || v == nil { - return fmt.Errorf("field hostedZoneId in SpecDistributionModulesIngressClusterIssuerRoute53: required") + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") } - if v, ok := raw["iamRoleArn"]; !ok || v == nil { - return fmt.Errorf("field iamRoleArn in SpecDistributionModulesIngressClusterIssuerRoute53: required") + if v, ok := raw["ports"]; !ok || v == nil { + return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") } - if v, ok := raw["region"]; !ok || v == nil { - return fmt.Errorf("field region in SpecDistributionModulesIngressClusterIssuerRoute53: required") + if v, ok := raw["protocol"]; !ok || v == nil { + return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") } - type Plain SpecDistributionModulesIngressClusterIssuerRoute53 + if v, ok := raw["self"]; !ok || v == nil { + return fmt.Errorf("field self in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") + } + type Plain SpecKubernetesNodePoolAdditionalFirewallRuleSelf var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressClusterIssuerRoute53(plain) + *j = SpecKubernetesNodePoolAdditionalFirewallRuleSelf(plain) return nil } +type SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType string + +var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType = []interface{}{ + "ingress", + "egress", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeTolerationEffect) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_TypesKubeTolerationEffect { + for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationEffect, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType, v) } - *j = TypesKubeTolerationEffect(v) + *j = SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType(v) return nil } +const ( + SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdTypeIngress SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType = "ingress" + SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdTypeEgress SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType = "egress" +) + +type SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId struct { + // The name for the additional Firewall rule Security Group. + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // Ports corresponds to the JSON schema field "ports". + Ports SpecKubernetesNodePoolAdditionalFirewallRulePorts `json:"ports" yaml:"ports" mapstructure:"ports"` + + // The protocol of the Firewall rule. + Protocol TypesAwsIpProtocol `json:"protocol" yaml:"protocol" mapstructure:"protocol"` + + // The source security group ID. + SourceSecurityGroupId string `json:"sourceSecurityGroupId" yaml:"sourceSecurityGroupId" mapstructure:"sourceSecurityGroupId"` + + // Additional AWS tags for the Firewall rule. + Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"` + + // The type of the Firewall rule, can be `ingress` for incoming traffic or + // `egress` for outgoing traffic. + Type SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType `json:"type" yaml:"type" mapstructure:"type"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthPomeriumSecrets) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["COOKIE_SECRET"]; !ok || v == nil { - return fmt.Errorf("field COOKIE_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") } - if v, ok := raw["IDP_CLIENT_SECRET"]; !ok || v == nil { - return fmt.Errorf("field IDP_CLIENT_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") + if v, ok := raw["ports"]; !ok || v == nil { + return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") } - if v, ok := raw["SHARED_SECRET"]; !ok || v == nil { - return fmt.Errorf("field SHARED_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") + if v, ok := raw["protocol"]; !ok || v == nil { + return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") } - if v, ok := raw["SIGNING_KEY"]; !ok || v == nil { - return fmt.Errorf("field SIGNING_KEY in SpecDistributionModulesAuthPomeriumSecrets: required") + if v, ok := raw["sourceSecurityGroupId"]; !ok || v == nil { + return fmt.Errorf("field sourceSecurityGroupId in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") } - type Plain SpecDistributionModulesAuthPomeriumSecrets + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") + } + type Plain SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthPomeriumSecrets(plain) + *j = SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId(plain) return nil } +// Optional additional firewall rules that will be attached to the nodes. +type SpecKubernetesNodePoolAdditionalFirewallRules struct { + // The CIDR blocks objects definition for the Firewall rule. Even though it is a + // list, only one item is currently supported. See + // https://github.com/sighupio/fury-eks-installer/issues/46 for more details. + CidrBlocks []SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock `json:"cidrBlocks,omitempty" yaml:"cidrBlocks,omitempty" mapstructure:"cidrBlocks,omitempty"` + + // Self corresponds to the JSON schema field "self". + Self []SpecKubernetesNodePoolAdditionalFirewallRuleSelf `json:"self,omitempty" yaml:"self,omitempty" mapstructure:"self,omitempty"` + + // SourceSecurityGroupId corresponds to the JSON schema field + // "sourceSecurityGroupId". + SourceSecurityGroupId []SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId `json:"sourceSecurityGroupId,omitempty" yaml:"sourceSecurityGroupId,omitempty" mapstructure:"sourceSecurityGroupId,omitempty"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesMonitoringType) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRules) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + type Plain SpecKubernetesNodePoolAdditionalFirewallRules + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + if plain.CidrBlocks != nil && len(plain.CidrBlocks) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "cidrBlocks", 1) + } + if len(plain.CidrBlocks) > 1 { + return fmt.Errorf("field %s length: must be <= %d", "cidrBlocks", 1) + } + if plain.Self != nil && len(plain.Self) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "self", 1) + } + if plain.SourceSecurityGroupId != nil && len(plain.SourceSecurityGroupId) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "sourceSecurityGroupId", 1) + } + *j = SpecKubernetesNodePoolAdditionalFirewallRules(plain) + return nil +} + +type SpecKubernetesNodePoolAmiType string + +var enumValues_SpecKubernetesNodePoolAmiType = []interface{}{ + "alinux2", + "alinux2023", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecKubernetesNodePoolAmiType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesMonitoringType { + for _, expected := range enumValues_SpecKubernetesNodePoolAmiType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAmiType, v) } - *j = SpecDistributionModulesMonitoringType(v) + *j = SpecKubernetesNodePoolAmiType(v) return nil } +const ( + SpecKubernetesNodePoolAmiTypeAlinux2 SpecKubernetesNodePoolAmiType = "alinux2" + SpecKubernetesNodePoolAmiTypeAlinux2023 SpecKubernetesNodePoolAmiType = "alinux2023" +) + +// Configuration for customize the Amazon Machine Image (AMI) for the machines of +// the Node Pool. +// +// The AMI can be chosen either by specifing the `ami.id` and `ami.owner` fields +// for using a custom AMI (just with `self-managed` node pool type) or by setting +// the `ami.type` field to one of the official AMIs based on Amazon Linux. +type SpecKubernetesNodePoolAmi struct { + // The ID of the AMI to use for the nodes, must be set toghether with the `owner` + // field. `ami.id` and `ami.owner` can be only set when Node Pool type is + // `self-managed` and they can't be set at the same time than `ami.type`. + Id *string `json:"id,omitempty" yaml:"id,omitempty" mapstructure:"id,omitempty"` + + // The owner of the AMI to use for the nodes, must be set toghether with the `id` + // field. `ami.id` and `ami.owner` can be only set when Node Pool type is + // `self-managed` and they can't be set at the same time than `ami.type`. + Owner *string `json:"owner,omitempty" yaml:"owner,omitempty" mapstructure:"owner,omitempty"` + + // The AMI type defines the AMI to use for `eks-managed` and `self-managed` type + // of Node Pools. Only Amazon Linux based AMIs are supported. It can't be set at + // the same time than `ami.id` and `ami.owner`. + Type *SpecKubernetesNodePoolAmiType `json:"type,omitempty" yaml:"type,omitempty" mapstructure:"type,omitempty"` +} + +type SpecKubernetesNodePoolContainerRuntime string + +var enumValues_SpecKubernetesNodePoolContainerRuntime = []interface{}{ + "docker", + "containerd", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressCertManagerClusterIssuerType) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolContainerRuntime) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType { + for _, expected := range enumValues_SpecKubernetesNodePoolContainerRuntime { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolContainerRuntime, v) } - *j = SpecDistributionModulesIngressCertManagerClusterIssuerType(v) + *j = SpecKubernetesNodePoolContainerRuntime(v) return nil } -var enumValues_TypesKubeTolerationEffect_1 = []interface{}{ - "NoSchedule", - "PreferNoSchedule", - "NoExecute", +const ( + SpecKubernetesNodePoolContainerRuntimeDocker SpecKubernetesNodePoolContainerRuntime = "docker" + SpecKubernetesNodePoolContainerRuntimeContainerd SpecKubernetesNodePoolContainerRuntime = "containerd" +) + +type SpecKubernetesNodePoolInstanceVolumeType string + +var enumValues_SpecKubernetesNodePoolInstanceVolumeType = []interface{}{ + "gp2", + "gp3", + "io1", + "standard", } // UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeTolerationEffect_1) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolInstanceVolumeType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_TypesKubeTolerationEffect_1 { + for _, expected := range enumValues_SpecKubernetesNodePoolInstanceVolumeType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationEffect_1, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolInstanceVolumeType, v) } - *j = TypesKubeTolerationEffect_1(v) + *j = SpecKubernetesNodePoolInstanceVolumeType(v) return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressCertManagerClusterIssuer) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["email"]; !ok || v == nil { - return fmt.Errorf("field email in SpecDistributionModulesIngressCertManagerClusterIssuer: required") - } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionModulesIngressCertManagerClusterIssuer: required") - } - if v, ok := raw["route53"]; !ok || v == nil { - return fmt.Errorf("field route53 in SpecDistributionModulesIngressCertManagerClusterIssuer: required") - } - type Plain SpecDistributionModulesIngressCertManagerClusterIssuer - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesIngressCertManagerClusterIssuer(plain) - return nil +const ( + SpecKubernetesNodePoolInstanceVolumeTypeGp2 SpecKubernetesNodePoolInstanceVolumeType = "gp2" + SpecKubernetesNodePoolInstanceVolumeTypeGp3 SpecKubernetesNodePoolInstanceVolumeType = "gp3" + SpecKubernetesNodePoolInstanceVolumeTypeIo1 SpecKubernetesNodePoolInstanceVolumeType = "io1" + SpecKubernetesNodePoolInstanceVolumeTypeStandard SpecKubernetesNodePoolInstanceVolumeType = "standard" +) + +// Configuration for the instances that will be used in the node pool. +type SpecKubernetesNodePoolInstance struct { + // Set the maximum pods per node to a custom value. If not set will use EKS + // default value that depends on the instance type. + // + // Ref: + // https://github.com/awslabs/amazon-eks-ami/blob/main/templates/shared/runtime/eni-max-pods.txt + MaxPods *int `json:"maxPods,omitempty" yaml:"maxPods,omitempty" mapstructure:"maxPods,omitempty"` + + // If `true`, the nodes will be created as spot instances. Default is `false`. + Spot *bool `json:"spot,omitempty" yaml:"spot,omitempty" mapstructure:"spot,omitempty"` + + // The instance type to use for the nodes. + Type string `json:"type" yaml:"type" mapstructure:"type"` + + // The size of the disk in GB. + VolumeSize *int `json:"volumeSize,omitempty" yaml:"volumeSize,omitempty" mapstructure:"volumeSize,omitempty"` + + // Volume type for the instance disk. Default is `gp2`. + VolumeType *SpecKubernetesNodePoolInstanceVolumeType `json:"volumeType,omitempty" yaml:"volumeType,omitempty" mapstructure:"volumeType,omitempty"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressCertManager) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolInstance) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["clusterIssuer"]; !ok || v == nil { - return fmt.Errorf("field clusterIssuer in SpecDistributionModulesIngressCertManager: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecKubernetesNodePoolInstance: required") } - type Plain SpecDistributionModulesIngressCertManager + type Plain SpecKubernetesNodePoolInstance var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressCertManager(plain) + *j = SpecKubernetesNodePoolInstance(plain) return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressDNSPrivate) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["create"]; !ok || v == nil { - return fmt.Errorf("field create in SpecDistributionModulesIngressDNSPrivate: required") - } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionModulesIngressDNSPrivate: required") - } - if v, ok := raw["vpcId"]; !ok || v == nil { - return fmt.Errorf("field vpcId in SpecDistributionModulesIngressDNSPrivate: required") - } - type Plain SpecDistributionModulesIngressDNSPrivate - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesIngressDNSPrivate(plain) - return nil +type TypesKubeLabels_1 map[string]string + +type SpecKubernetesNodePoolSize struct { + // The maximum number of nodes in the node pool. + Max int `json:"max" yaml:"max" mapstructure:"max"` + + // The minimum number of nodes in the node pool. + Min int `json:"min" yaml:"min" mapstructure:"min"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressDNSPublic) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolSize) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["create"]; !ok || v == nil { - return fmt.Errorf("field create in SpecDistributionModulesIngressDNSPublic: required") + if v, ok := raw["max"]; !ok || v == nil { + return fmt.Errorf("field max in SpecKubernetesNodePoolSize: required") } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionModulesIngressDNSPublic: required") + if v, ok := raw["min"]; !ok || v == nil { + return fmt.Errorf("field min in SpecKubernetesNodePoolSize: required") } - type Plain SpecDistributionModulesIngressDNSPublic + type Plain SpecKubernetesNodePoolSize var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressDNSPublic(plain) + *j = SpecKubernetesNodePoolSize(plain) return nil } -var enumValues_TypesKubeTolerationOperator_1 = []interface{}{ - "Exists", - "Equal", +type TypesAwsSubnetId string + +type TypesKubeTaints []string + +type SpecKubernetesNodePoolType string + +var enumValues_SpecKubernetesNodePoolType = []interface{}{ + "eks-managed", + "self-managed", } // UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeTolerationOperator_1) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_TypesKubeTolerationOperator_1 { + for _, expected := range enumValues_SpecKubernetesNodePoolType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationOperator_1, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolType, v) } - *j = TypesKubeTolerationOperator_1(v) + *j = SpecKubernetesNodePoolType(v) return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressDNS) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["private"]; !ok || v == nil { - return fmt.Errorf("field private in SpecDistributionModulesIngressDNS: required") - } - if v, ok := raw["public"]; !ok || v == nil { - return fmt.Errorf("field public in SpecDistributionModulesIngressDNS: required") - } - type Plain SpecDistributionModulesIngressDNS - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesIngressDNS(plain) - return nil +const ( + SpecKubernetesNodePoolTypeEksManaged SpecKubernetesNodePoolType = "eks-managed" + SpecKubernetesNodePoolTypeSelfManaged SpecKubernetesNodePoolType = "self-managed" +) + +// Array with all the node pool definitions that will join the cluster. Each item +// is an object. +type SpecKubernetesNodePool struct { + // AdditionalFirewallRules corresponds to the JSON schema field + // "additionalFirewallRules". + AdditionalFirewallRules *SpecKubernetesNodePoolAdditionalFirewallRules `json:"additionalFirewallRules,omitempty" yaml:"additionalFirewallRules,omitempty" mapstructure:"additionalFirewallRules,omitempty"` + + // Ami corresponds to the JSON schema field "ami". + Ami *SpecKubernetesNodePoolAmi `json:"ami,omitempty" yaml:"ami,omitempty" mapstructure:"ami,omitempty"` + + // This optional array defines additional target groups to attach to the instances + // in the node pool. + AttachedTargetGroups []TypesAwsArn `json:"attachedTargetGroups,omitempty" yaml:"attachedTargetGroups,omitempty" mapstructure:"attachedTargetGroups,omitempty"` + + // The container runtime to use in the nodes of the node pool. Default is + // `containerd`. + ContainerRuntime *SpecKubernetesNodePoolContainerRuntime `json:"containerRuntime,omitempty" yaml:"containerRuntime,omitempty" mapstructure:"containerRuntime,omitempty"` + + // Instance corresponds to the JSON schema field "instance". + Instance SpecKubernetesNodePoolInstance `json:"instance" yaml:"instance" mapstructure:"instance"` + + // Kubernetes labels that will be added to the nodes. + Labels TypesKubeLabels_1 `json:"labels,omitempty" yaml:"labels,omitempty" mapstructure:"labels,omitempty"` + + // The name of the node pool. + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // Size corresponds to the JSON schema field "size". + Size SpecKubernetesNodePoolSize `json:"size" yaml:"size" mapstructure:"size"` + + // Optional list of subnet IDs where to create the nodes. + SubnetIds []TypesAwsSubnetId `json:"subnetIds,omitempty" yaml:"subnetIds,omitempty" mapstructure:"subnetIds,omitempty"` + + // AWS tags that will be added to the ASG and EC2 instances. + Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"` + + // Kubernetes taints that will be added to the nodes. + Taints TypesKubeTaints `json:"taints,omitempty" yaml:"taints,omitempty" mapstructure:"taints,omitempty"` + + // The type of Node Pool, can be `self-managed` for using customization like + // custom AMI, set max pods per node or `eks-managed` for using prebuilt AMIs from + // Amazon via the `ami.type` field. It is recommended to use `self-managed`. + Type SpecKubernetesNodePoolType `json:"type" yaml:"type" mapstructure:"type"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressExternalDNS) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePool) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["privateIamRoleArn"]; !ok || v == nil { - return fmt.Errorf("field privateIamRoleArn in SpecDistributionModulesIngressExternalDNS: required") + if v, ok := raw["instance"]; !ok || v == nil { + return fmt.Errorf("field instance in SpecKubernetesNodePool: required") } - if v, ok := raw["publicIamRoleArn"]; !ok || v == nil { - return fmt.Errorf("field publicIamRoleArn in SpecDistributionModulesIngressExternalDNS: required") + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecKubernetesNodePool: required") } - type Plain SpecDistributionModulesIngressExternalDNS + if v, ok := raw["size"]; !ok || v == nil { + return fmt.Errorf("field size in SpecKubernetesNodePool: required") + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecKubernetesNodePool: required") + } + type Plain SpecKubernetesNodePool var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressExternalDNS(plain) + *j = SpecKubernetesNodePool(plain) return nil } +type SpecKubernetesNodePoolsLaunchKind string + +var enumValues_SpecKubernetesNodePoolsLaunchKind = []interface{}{ + "launch_configurations", + "launch_templates", + "both", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxTLSProvider) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolsLaunchKind) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesIngressNginxTLSProvider { + for _, expected := range enumValues_SpecKubernetesNodePoolsLaunchKind { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxTLSProvider, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolsLaunchKind, v) } - *j = SpecDistributionModulesIngressNginxTLSProvider(v) + *j = SpecKubernetesNodePoolsLaunchKind(v) return nil } +const ( + SpecKubernetesNodePoolsLaunchKindLaunchConfigurations SpecKubernetesNodePoolsLaunchKind = "launch_configurations" + SpecKubernetesNodePoolsLaunchKindLaunchTemplates SpecKubernetesNodePoolsLaunchKind = "launch_templates" + SpecKubernetesNodePoolsLaunchKindBoth SpecKubernetesNodePoolsLaunchKind = "both" +) + +// Defines the Kubernetes components configuration and the values needed for the +// `kubernetes` phase of furyctl. +type SpecKubernetes struct { + // ApiServer corresponds to the JSON schema field "apiServer". + ApiServer SpecKubernetesAPIServer `json:"apiServer" yaml:"apiServer" mapstructure:"apiServer"` + + // AwsAuth corresponds to the JSON schema field "awsAuth". + AwsAuth *SpecKubernetesAwsAuth `json:"awsAuth,omitempty" yaml:"awsAuth,omitempty" mapstructure:"awsAuth,omitempty"` + + // Overrides the default prefix for the IAM role name of the EKS cluster. If not + // set, a name will be generated from the cluster name. + ClusterIAMRoleNamePrefixOverride *TypesAwsIamRoleNamePrefix `json:"clusterIAMRoleNamePrefixOverride,omitempty" yaml:"clusterIAMRoleNamePrefixOverride,omitempty" mapstructure:"clusterIAMRoleNamePrefixOverride,omitempty"` + + // Optional Kubernetes Cluster log retention in CloudWatch, expressed in days. + // Setting the value to zero (`0`) makes retention last forever. Default is `90` + // days. + LogRetentionDays *SpecKubernetesLogRetentionDays `json:"logRetentionDays,omitempty" yaml:"logRetentionDays,omitempty" mapstructure:"logRetentionDays,omitempty"` + + // Optional list of Kubernetes Cluster log types to enable. Defaults to all types. + LogsTypes []SpecKubernetesLogsTypesElem `json:"logsTypes,omitempty" yaml:"logsTypes,omitempty" mapstructure:"logsTypes,omitempty"` + + // The SSH public key that can connect to the nodes via SSH using the `ec2-user` + // user. Example: the contents of your `~/.ssh/id_ras.pub` file. + NodeAllowedSshPublicKey interface{} `json:"nodeAllowedSshPublicKey" yaml:"nodeAllowedSshPublicKey" mapstructure:"nodeAllowedSshPublicKey"` + + // Global default AMI type used for EKS worker nodes. This will apply to all node + // pools unless overridden by a specific node pool. + NodePoolGlobalAmiType SpecKubernetesNodePoolGlobalAmiType `json:"nodePoolGlobalAmiType" yaml:"nodePoolGlobalAmiType" mapstructure:"nodePoolGlobalAmiType"` + + // NodePools corresponds to the JSON schema field "nodePools". + NodePools []SpecKubernetesNodePool `json:"nodePools" yaml:"nodePools" mapstructure:"nodePools"` + + // Accepted values are `launch_configurations`, `launch_templates` or `both`. For + // new clusters use `launch_templates`, for adopting an existing cluster you'll + // need to migrate from `launch_configurations` to `launch_templates` using `both` + // as interim. + NodePoolsLaunchKind SpecKubernetesNodePoolsLaunchKind `json:"nodePoolsLaunchKind" yaml:"nodePoolsLaunchKind" mapstructure:"nodePoolsLaunchKind"` + + // This value defines the network CIDR that will be used to assign IP addresses to + // Kubernetes services. + ServiceIpV4Cidr *TypesCidr `json:"serviceIpV4Cidr,omitempty" yaml:"serviceIpV4Cidr,omitempty" mapstructure:"serviceIpV4Cidr,omitempty"` + + // Required only if `.spec.infrastructure.vpc` is omitted. This value defines the + // ID of the subnet where the EKS cluster will be created. + SubnetIds []TypesAwsSubnetId `json:"subnetIds,omitempty" yaml:"subnetIds,omitempty" mapstructure:"subnetIds,omitempty"` + + // Required only if `.spec.infrastructure.vpc` is omitted. This value defines the + // ID of the VPC where the EKS cluster and its related resources will be created. + VpcId *TypesAwsVpcId `json:"vpcId,omitempty" yaml:"vpcId,omitempty" mapstructure:"vpcId,omitempty"` + + // Overrides the default prefix for the IAM role name of the EKS workers. If not + // set, a name will be generated from the cluster name. + WorkersIAMRoleNamePrefixOverride *TypesAwsIamRoleNamePrefix `json:"workersIAMRoleNamePrefixOverride,omitempty" yaml:"workersIAMRoleNamePrefixOverride,omitempty" mapstructure:"workersIAMRoleNamePrefixOverride,omitempty"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeToleration_1) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetes) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["effect"]; !ok || v == nil { - return fmt.Errorf("field effect in TypesKubeToleration_1: required") + if v, ok := raw["apiServer"]; !ok || v == nil { + return fmt.Errorf("field apiServer in SpecKubernetes: required") } - if v, ok := raw["key"]; !ok || v == nil { - return fmt.Errorf("field key in TypesKubeToleration_1: required") + if v, ok := raw["nodeAllowedSshPublicKey"]; !ok || v == nil { + return fmt.Errorf("field nodeAllowedSshPublicKey in SpecKubernetes: required") + } + if v, ok := raw["nodePoolGlobalAmiType"]; !ok || v == nil { + return fmt.Errorf("field nodePoolGlobalAmiType in SpecKubernetes: required") + } + if v, ok := raw["nodePools"]; !ok || v == nil { + return fmt.Errorf("field nodePools in SpecKubernetes: required") + } + if v, ok := raw["nodePoolsLaunchKind"]; !ok || v == nil { + return fmt.Errorf("field nodePoolsLaunchKind in SpecKubernetes: required") + } + type Plain SpecKubernetes + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecKubernetes(plain) + return nil +} + +type SpecPluginsHelmReleasesElemSetElem struct { + // The name of the set + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // The value of the set + Value string `json:"value" yaml:"value" mapstructure:"value"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecPluginsHelmReleasesElemSetElem) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecPluginsHelmReleasesElemSetElem: required") } if v, ok := raw["value"]; !ok || v == nil { - return fmt.Errorf("field value in TypesKubeToleration_1: required") + return fmt.Errorf("field value in SpecPluginsHelmReleasesElemSetElem: required") } - type Plain TypesKubeToleration_1 + type Plain SpecPluginsHelmReleasesElemSetElem var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = TypesKubeToleration_1(plain) + *j = SpecPluginsHelmReleasesElemSetElem(plain) return nil } +type SpecPluginsHelmReleases []struct { + // The chart of the release + Chart string `json:"chart" yaml:"chart" mapstructure:"chart"` + + // Disable running `helm diff` validation when installing the plugin, it will + // still be done when upgrading. + DisableValidationOnInstall *bool `json:"disableValidationOnInstall,omitempty" yaml:"disableValidationOnInstall,omitempty" mapstructure:"disableValidationOnInstall,omitempty"` + + // The name of the release + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // The namespace of the release + Namespace string `json:"namespace" yaml:"namespace" mapstructure:"namespace"` + + // Set corresponds to the JSON schema field "set". + Set []SpecPluginsHelmReleasesElemSetElem `json:"set,omitempty" yaml:"set,omitempty" mapstructure:"set,omitempty"` + + // The values of the release + Values []string `json:"values,omitempty" yaml:"values,omitempty" mapstructure:"values,omitempty"` + + // The version of the release + Version *string `json:"version,omitempty" yaml:"version,omitempty" mapstructure:"version,omitempty"` +} + +type SpecPluginsHelmRepositories []struct { + // The name of the repository + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // The url of the repository + Url string `json:"url" yaml:"url" mapstructure:"url"` +} + +type SpecPluginsHelm struct { + // Releases corresponds to the JSON schema field "releases". + Releases SpecPluginsHelmReleases `json:"releases,omitempty" yaml:"releases,omitempty" mapstructure:"releases,omitempty"` + + // Repositories corresponds to the JSON schema field "repositories". + Repositories SpecPluginsHelmRepositories `json:"repositories,omitempty" yaml:"repositories,omitempty" mapstructure:"repositories,omitempty"` +} + +type SpecPluginsKustomize []struct { + // The folder of the kustomize plugin + Folder string `json:"folder" yaml:"folder" mapstructure:"folder"` + + // The name of the kustomize plugin + Name string `json:"name" yaml:"name" mapstructure:"name"` +} + +type SpecPlugins struct { + // Helm corresponds to the JSON schema field "helm". + Helm *SpecPluginsHelm `json:"helm,omitempty" yaml:"helm,omitempty" mapstructure:"helm,omitempty"` + + // Kustomize corresponds to the JSON schema field "kustomize". + Kustomize SpecPluginsKustomize `json:"kustomize,omitempty" yaml:"kustomize,omitempty" mapstructure:"kustomize,omitempty"` +} + +type TypesAwsS3KeyPrefix string + +// Configuration for the S3 bucket used to store the Terraform state. +type SpecToolsConfigurationTerraformStateS3 struct { + // This value defines which bucket will be used to store all the states. + BucketName TypesAwsS3BucketName `json:"bucketName" yaml:"bucketName" mapstructure:"bucketName"` + + // This value defines which folder will be used to store all the states inside the + // bucket. + KeyPrefix TypesAwsS3KeyPrefix `json:"keyPrefix" yaml:"keyPrefix" mapstructure:"keyPrefix"` + + // This value defines in which region the bucket is located. + Region TypesAwsRegion `json:"region" yaml:"region" mapstructure:"region"` + + // This value defines if the region of the bucket should be validated or not by + // Terraform, useful when using a bucket in a recently added region. + SkipRegionValidation *bool `json:"skipRegionValidation,omitempty" yaml:"skipRegionValidation,omitempty" mapstructure:"skipRegionValidation,omitempty"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxTLSSecret) UnmarshalJSON(b []byte) error { +func (j *SpecToolsConfigurationTerraformStateS3) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["ca"]; !ok || v == nil { - return fmt.Errorf("field ca in SpecDistributionModulesIngressNginxTLSSecret: required") + if v, ok := raw["bucketName"]; !ok || v == nil { + return fmt.Errorf("field bucketName in SpecToolsConfigurationTerraformStateS3: required") } - if v, ok := raw["cert"]; !ok || v == nil { - return fmt.Errorf("field cert in SpecDistributionModulesIngressNginxTLSSecret: required") + if v, ok := raw["keyPrefix"]; !ok || v == nil { + return fmt.Errorf("field keyPrefix in SpecToolsConfigurationTerraformStateS3: required") } - if v, ok := raw["key"]; !ok || v == nil { - return fmt.Errorf("field key in SpecDistributionModulesIngressNginxTLSSecret: required") + if v, ok := raw["region"]; !ok || v == nil { + return fmt.Errorf("field region in SpecToolsConfigurationTerraformStateS3: required") } - type Plain SpecDistributionModulesIngressNginxTLSSecret + type Plain SpecToolsConfigurationTerraformStateS3 var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressNginxTLSSecret(plain) + *j = SpecToolsConfigurationTerraformStateS3(plain) return nil } -var enumValues_TypesKubeTolerationEffect = []interface{}{ - "NoSchedule", - "PreferNoSchedule", - "NoExecute", +// Configuration for storing the Terraform state of the cluster. +type SpecToolsConfigurationTerraformState struct { + // S3 corresponds to the JSON schema field "s3". + S3 SpecToolsConfigurationTerraformStateS3 `json:"s3" yaml:"s3" mapstructure:"s3"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxTLS) UnmarshalJSON(b []byte) error { +func (j *SpecToolsConfigurationTerraformState) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["provider"]; !ok || v == nil { - return fmt.Errorf("field provider in SpecDistributionModulesIngressNginxTLS: required") + if v, ok := raw["s3"]; !ok || v == nil { + return fmt.Errorf("field s3 in SpecToolsConfigurationTerraformState: required") } - type Plain SpecDistributionModulesIngressNginxTLS + type Plain SpecToolsConfigurationTerraformState var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressNginxTLS(plain) + *j = SpecToolsConfigurationTerraformState(plain) return nil } +type SpecToolsConfigurationTerraform struct { + // State corresponds to the JSON schema field "state". + State SpecToolsConfigurationTerraformState `json:"state" yaml:"state" mapstructure:"state"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthPomerium_2) UnmarshalJSON(b []byte) error { +func (j *SpecToolsConfigurationTerraform) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["secrets"]; !ok || v == nil { - return fmt.Errorf("field secrets in SpecDistributionModulesAuthPomerium_2: required") + if v, ok := raw["state"]; !ok || v == nil { + return fmt.Errorf("field state in SpecToolsConfigurationTerraform: required") } - type Plain SpecDistributionModulesAuthPomerium_2 + type Plain SpecToolsConfigurationTerraform var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthPomerium_2(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesMonitoringMimirBackend) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesMonitoringMimirBackend { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringMimirBackend, v) - } - *j = SpecDistributionModulesMonitoringMimirBackend(v) + *j = SpecToolsConfigurationTerraform(plain) return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesIngressNginxType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxType, v) - } - *j = SpecDistributionModulesIngressNginxType(v) - return nil +type SpecToolsConfiguration struct { + // Terraform corresponds to the JSON schema field "terraform". + Terraform SpecToolsConfigurationTerraform `json:"terraform" yaml:"terraform" mapstructure:"terraform"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginx) UnmarshalJSON(b []byte) error { +func (j *SpecToolsConfiguration) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesIngressNginx: required") + if v, ok := raw["terraform"]; !ok || v == nil { + return fmt.Errorf("field terraform in SpecToolsConfiguration: required") } - type Plain SpecDistributionModulesIngressNginx + type Plain SpecToolsConfiguration var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressNginx(plain) + *j = SpecToolsConfiguration(plain) return nil } +type Spec struct { + // Distribution corresponds to the JSON schema field "distribution". + Distribution SpecDistribution `json:"distribution" yaml:"distribution" mapstructure:"distribution"` + + // Defines which KFD version will be installed and, in consequence, the Kubernetes + // version used to create the cluster. It supports git tags and branches. Example: + // `v1.30.1`. + DistributionVersion string `json:"distributionVersion" yaml:"distributionVersion" mapstructure:"distributionVersion"` + + // Infrastructure corresponds to the JSON schema field "infrastructure". + Infrastructure *SpecInfrastructure `json:"infrastructure,omitempty" yaml:"infrastructure,omitempty" mapstructure:"infrastructure,omitempty"` + + // Kubernetes corresponds to the JSON schema field "kubernetes". + Kubernetes SpecKubernetes `json:"kubernetes" yaml:"kubernetes" mapstructure:"kubernetes"` + + // Plugins corresponds to the JSON schema field "plugins". + Plugins *SpecPlugins `json:"plugins,omitempty" yaml:"plugins,omitempty" mapstructure:"plugins,omitempty"` + + // Defines in which AWS region the cluster and all the related resources will be + // created. + Region TypesAwsRegion `json:"region" yaml:"region" mapstructure:"region"` + + // This map defines which will be the common tags that will be added to all the + // resources created on AWS. + Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"` + + // Configuration for tools used by furyctl, like Terraform. + ToolsConfiguration SpecToolsConfiguration `json:"toolsConfiguration" yaml:"toolsConfiguration" mapstructure:"toolsConfiguration"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngress) UnmarshalJSON(b []byte) error { +func (j *Spec) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["baseDomain"]; !ok || v == nil { - return fmt.Errorf("field baseDomain in SpecDistributionModulesIngress: required") + if v, ok := raw["distribution"]; !ok || v == nil { + return fmt.Errorf("field distribution in Spec: required") } - if v, ok := raw["certManager"]; !ok || v == nil { - return fmt.Errorf("field certManager in SpecDistributionModulesIngress: required") + if v, ok := raw["distributionVersion"]; !ok || v == nil { + return fmt.Errorf("field distributionVersion in Spec: required") } - if v, ok := raw["dns"]; !ok || v == nil { - return fmt.Errorf("field dns in SpecDistributionModulesIngress: required") + if v, ok := raw["kubernetes"]; !ok || v == nil { + return fmt.Errorf("field kubernetes in Spec: required") } - if v, ok := raw["externalDns"]; !ok || v == nil { - return fmt.Errorf("field externalDns in SpecDistributionModulesIngress: required") + if v, ok := raw["region"]; !ok || v == nil { + return fmt.Errorf("field region in Spec: required") } - if v, ok := raw["nginx"]; !ok || v == nil { - return fmt.Errorf("field nginx in SpecDistributionModulesIngress: required") + if v, ok := raw["toolsConfiguration"]; !ok || v == nil { + return fmt.Errorf("field toolsConfiguration in Spec: required") } - type Plain SpecDistributionModulesIngress + type Plain Spec var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngress(plain) + if len(plain.DistributionVersion) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "distributionVersion", 1) + } + *j = Spec(plain) return nil } +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyGatekeeperPolicyManagerElem map[string]interface{} + +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyHubbleUiElem map[string]interface{} + +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyIngressNgnixForecastleElem map[string]interface{} + +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyLoggingMinioConsoleElem map[string]interface{} + +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyLoggingOpensearchDashboardsElem map[string]interface{} + +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringAlertmanagerElem map[string]interface{} + +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringGrafanaElem map[string]interface{} + +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringMinioConsoleElem map[string]interface{} + +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringPrometheusElem map[string]interface{} + +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyTracingMinioConsoleElem map[string]interface{} + +// override default routes for KFD components +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicy struct { + // GatekeeperPolicyManager corresponds to the JSON schema field + // "gatekeeperPolicyManager". + GatekeeperPolicyManager []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyGatekeeperPolicyManagerElem `json:"gatekeeperPolicyManager,omitempty" yaml:"gatekeeperPolicyManager,omitempty" mapstructure:"gatekeeperPolicyManager,omitempty"` + + // HubbleUi corresponds to the JSON schema field "hubbleUi". + HubbleUi []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyHubbleUiElem `json:"hubbleUi,omitempty" yaml:"hubbleUi,omitempty" mapstructure:"hubbleUi,omitempty"` + + // IngressNgnixForecastle corresponds to the JSON schema field + // "ingressNgnixForecastle". + IngressNgnixForecastle []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyIngressNgnixForecastleElem `json:"ingressNgnixForecastle,omitempty" yaml:"ingressNgnixForecastle,omitempty" mapstructure:"ingressNgnixForecastle,omitempty"` + + // LoggingMinioConsole corresponds to the JSON schema field "loggingMinioConsole". + LoggingMinioConsole []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyLoggingMinioConsoleElem `json:"loggingMinioConsole,omitempty" yaml:"loggingMinioConsole,omitempty" mapstructure:"loggingMinioConsole,omitempty"` + + // LoggingOpensearchDashboards corresponds to the JSON schema field + // "loggingOpensearchDashboards". + LoggingOpensearchDashboards []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyLoggingOpensearchDashboardsElem `json:"loggingOpensearchDashboards,omitempty" yaml:"loggingOpensearchDashboards,omitempty" mapstructure:"loggingOpensearchDashboards,omitempty"` + + // MonitoringAlertmanager corresponds to the JSON schema field + // "monitoringAlertmanager". + MonitoringAlertmanager []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringAlertmanagerElem `json:"monitoringAlertmanager,omitempty" yaml:"monitoringAlertmanager,omitempty" mapstructure:"monitoringAlertmanager,omitempty"` + + // MonitoringGrafana corresponds to the JSON schema field "monitoringGrafana". + MonitoringGrafana []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringGrafanaElem `json:"monitoringGrafana,omitempty" yaml:"monitoringGrafana,omitempty" mapstructure:"monitoringGrafana,omitempty"` + + // MonitoringMinioConsole corresponds to the JSON schema field + // "monitoringMinioConsole". + MonitoringMinioConsole []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringMinioConsoleElem `json:"monitoringMinioConsole,omitempty" yaml:"monitoringMinioConsole,omitempty" mapstructure:"monitoringMinioConsole,omitempty"` + + // MonitoringPrometheus corresponds to the JSON schema field + // "monitoringPrometheus". + MonitoringPrometheus []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringPrometheusElem `json:"monitoringPrometheus,omitempty" yaml:"monitoringPrometheus,omitempty" mapstructure:"monitoringPrometheus,omitempty"` + + // TracingMinioConsole corresponds to the JSON schema field "tracingMinioConsole". + TracingMinioConsole []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyTracingMinioConsoleElem `json:"tracingMinioConsole,omitempty" yaml:"tracingMinioConsole,omitempty" mapstructure:"tracingMinioConsole,omitempty"` +} + +// Pomerium needs some user-provided secrets to be fully configured. These secrets +// should be unique between clusters. +type SpecDistributionModulesAuthPomeriumSecrets struct { + // Cookie Secret is the secret used to encrypt and sign session cookies. + // + // To generate a random key, run the following command: `head -c32 /dev/urandom | + // base64` + COOKIESECRET string `json:"COOKIE_SECRET" yaml:"COOKIE_SECRET" mapstructure:"COOKIE_SECRET"` + + // Identity Provider Client Secret is the OAuth 2.0 Secret Identifier. When auth + // type is SSO, this value will be the secret used to authenticate Pomerium with + // Dex, **use a strong random value**. + IDPCLIENTSECRET string `json:"IDP_CLIENT_SECRET" yaml:"IDP_CLIENT_SECRET" mapstructure:"IDP_CLIENT_SECRET"` + + // Shared Secret is the base64-encoded, 256-bit key used to mutually authenticate + // requests between Pomerium services. It's critical that secret keys are random, + // and stored safely. + // + // To generate a key, run the following command: `head -c32 /dev/urandom | base64` + SHAREDSECRET string `json:"SHARED_SECRET" yaml:"SHARED_SECRET" mapstructure:"SHARED_SECRET"` + + // Signing Key is the base64 representation of one or more PEM-encoded private + // keys used to sign a user's attestation JWT, which can be consumed by upstream + // applications to pass along identifying user information like username, id, and + // groups. + // + // To generates an P-256 (ES256) signing key: + // + // ```bash + // openssl ecparam -genkey -name prime256v1 -noout -out ec_private.pem + // # careful! this will output your private key in terminal + // cat ec_private.pem | base64 + // ``` + SIGNINGKEY string `json:"SIGNING_KEY" yaml:"SIGNING_KEY" mapstructure:"SIGNING_KEY"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingCustomOutputs) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuthPomeriumSecrets) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["audit"]; !ok || v == nil { - return fmt.Errorf("field audit in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["errors"]; !ok || v == nil { - return fmt.Errorf("field errors in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["events"]; !ok || v == nil { - return fmt.Errorf("field events in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["infra"]; !ok || v == nil { - return fmt.Errorf("field infra in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["ingressNginx"]; !ok || v == nil { - return fmt.Errorf("field ingressNginx in SpecDistributionModulesLoggingCustomOutputs: required") + if v, ok := raw["COOKIE_SECRET"]; !ok || v == nil { + return fmt.Errorf("field COOKIE_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") } - if v, ok := raw["kubernetes"]; !ok || v == nil { - return fmt.Errorf("field kubernetes in SpecDistributionModulesLoggingCustomOutputs: required") + if v, ok := raw["IDP_CLIENT_SECRET"]; !ok || v == nil { + return fmt.Errorf("field IDP_CLIENT_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") } - if v, ok := raw["systemdCommon"]; !ok || v == nil { - return fmt.Errorf("field systemdCommon in SpecDistributionModulesLoggingCustomOutputs: required") + if v, ok := raw["SHARED_SECRET"]; !ok || v == nil { + return fmt.Errorf("field SHARED_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") } - if v, ok := raw["systemdEtcd"]; !ok || v == nil { - return fmt.Errorf("field systemdEtcd in SpecDistributionModulesLoggingCustomOutputs: required") + if v, ok := raw["SIGNING_KEY"]; !ok || v == nil { + return fmt.Errorf("field SIGNING_KEY in SpecDistributionModulesAuthPomeriumSecrets: required") } - type Plain SpecDistributionModulesLoggingCustomOutputs + type Plain SpecDistributionModulesAuthPomeriumSecrets var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesLoggingCustomOutputs(plain) + *j = SpecDistributionModulesAuthPomeriumSecrets(plain) return nil } +type TypesKubeNodeSelector_1 map[string]string + +type TypesKubeTolerationEffect_1 string + +var enumValues_TypesKubeTolerationEffect_1 = []interface{}{ + "NoSchedule", + "PreferNoSchedule", + "NoExecute", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLogging) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *TypesKubeTolerationEffect_1) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesLogging: required") + var ok bool + for _, expected := range enumValues_TypesKubeTolerationEffect_1 { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecDistributionModulesLogging - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationEffect_1, v) } - *j = SpecDistributionModulesLogging(plain) + *j = TypesKubeTolerationEffect_1(v) return nil } +const ( + TypesKubeTolerationEffect_1_NoSchedule TypesKubeTolerationEffect_1 = "NoSchedule" + TypesKubeTolerationEffect_1_PreferNoSchedule TypesKubeTolerationEffect_1 = "PreferNoSchedule" + TypesKubeTolerationEffect_1_NoExecute TypesKubeTolerationEffect_1 = "NoExecute" +) + +type TypesKubeTolerationOperator_1 string + +var enumValues_TypesKubeTolerationOperator_1 = []interface{}{ + "Exists", + "Equal", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingLokiBackend) UnmarshalJSON(b []byte) error { +func (j *TypesKubeTolerationOperator_1) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesLoggingLokiBackend { + for _, expected := range enumValues_TypesKubeTolerationOperator_1 { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingLokiBackend, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationOperator_1, v) } - *j = SpecDistributionModulesLoggingLokiBackend(v) + *j = TypesKubeTolerationOperator_1(v) return nil } +const ( + TypesKubeTolerationOperator_1_Exists TypesKubeTolerationOperator_1 = "Exists" + TypesKubeTolerationOperator_1_Equal TypesKubeTolerationOperator_1 = "Equal" +) + +type TypesKubeToleration_1 struct { + // Effect corresponds to the JSON schema field "effect". + Effect TypesKubeTolerationEffect_1 `json:"effect" yaml:"effect" mapstructure:"effect"` + + // Key corresponds to the JSON schema field "key". + Key string `json:"key" yaml:"key" mapstructure:"key"` + + // Operator corresponds to the JSON schema field "operator". + Operator *TypesKubeTolerationOperator_1 `json:"operator,omitempty" yaml:"operator,omitempty" mapstructure:"operator,omitempty"` + + // Value corresponds to the JSON schema field "value". + Value string `json:"value" yaml:"value" mapstructure:"value"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCommonProvider) UnmarshalJSON(b []byte) error { +func (j *TypesKubeToleration_1) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionCommonProvider: required") + if v, ok := raw["effect"]; !ok || v == nil { + return fmt.Errorf("field effect in TypesKubeToleration_1: required") } - type Plain SpecDistributionCommonProvider + if v, ok := raw["key"]; !ok || v == nil { + return fmt.Errorf("field key in TypesKubeToleration_1: required") + } + if v, ok := raw["value"]; !ok || v == nil { + return fmt.Errorf("field value in TypesKubeToleration_1: required") + } + type Plain TypesKubeToleration_1 var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionCommonProvider(plain) + *j = TypesKubeToleration_1(plain) return nil } +type TypesFuryModuleComponentOverrides_1 struct { + // NodeSelector corresponds to the JSON schema field "nodeSelector". + NodeSelector TypesKubeNodeSelector_1 `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // Tolerations corresponds to the JSON schema field "tolerations". + Tolerations []TypesKubeToleration_1 `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +} + +type SpecDistributionModulesAuthPomeriumRoutesElem map[string]interface{} + +// Configuration for Pomerium, an identity-aware reverse proxy used for SSO. +type SpecDistributionModulesAuthPomerium_2 struct { + // DefaultRoutesPolicy corresponds to the JSON schema field "defaultRoutesPolicy". + DefaultRoutesPolicy *SpecDistributionModulesAuthPomeriumDefaultRoutesPolicy `json:"defaultRoutesPolicy,omitempty" yaml:"defaultRoutesPolicy,omitempty" mapstructure:"defaultRoutesPolicy,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides_1 `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // DEPRECATED: Use defaultRoutesPolicy and/or routes + Policy *string `json:"policy,omitempty" yaml:"policy,omitempty" mapstructure:"policy,omitempty"` + + // Additional routes configuration for Pomerium. Follows Pomerium's route format: + // https://www.pomerium.com/docs/reference/routes + Routes []SpecDistributionModulesAuthPomeriumRoutesElem `json:"routes,omitempty" yaml:"routes,omitempty" mapstructure:"routes,omitempty"` + + // Secrets corresponds to the JSON schema field "secrets". + Secrets SpecDistributionModulesAuthPomeriumSecrets `json:"secrets" yaml:"secrets" mapstructure:"secrets"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecDistributionModulesAuthPomerium_2) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesLoggingType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["secrets"]; !ok || v == nil { + return fmt.Errorf("field secrets in SpecDistributionModulesAuthPomerium_2: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingType, v) + type Plain SpecDistributionModulesAuthPomerium_2 + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err } - *j = SpecDistributionModulesLoggingType(v) + *j = SpecDistributionModulesAuthPomerium_2(plain) return nil } +type TypesAwsSshPubKey string + +type TypesEnvRef string + +type TypesFileRef string + +type TypesIpAddress string + +type TypesSemVer string + +type TypesSshPubKey string + +type TypesUri string + +type EksclusterKfdV1Alpha2Kind string + +var enumValues_EksclusterKfdV1Alpha2Kind = []interface{}{ + "EKSCluster", +} + // UnmarshalJSON implements json.Unmarshaler. func (j *EksclusterKfdV1Alpha2Kind) UnmarshalJSON(b []byte) error { var v string @@ -4235,48 +4619,21 @@ func (j *EksclusterKfdV1Alpha2Kind) UnmarshalJSON(b []byte) error { return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingOpensearchType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesLoggingOpensearchType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingOpensearchType, v) - } - *j = SpecDistributionModulesLoggingOpensearchType(v) - return nil -} +const EksclusterKfdV1Alpha2KindEKSCluster EksclusterKfdV1Alpha2Kind = "EKSCluster" -// UnmarshalJSON implements json.Unmarshaler. -func (j *Metadata) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in Metadata: required") - } - type Plain Metadata - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - if len(plain.Name) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "name", 1) - } - if len(plain.Name) > 56 { - return fmt.Errorf("field %s length: must be <= %d", "name", 56) - } - *j = Metadata(plain) - return nil +// A KFD Cluster deployed on top of AWS's Elastic Kubernetes Service (EKS). +type EksclusterKfdV1Alpha2 struct { + // ApiVersion corresponds to the JSON schema field "apiVersion". + ApiVersion string `json:"apiVersion" yaml:"apiVersion" mapstructure:"apiVersion"` + + // Kind corresponds to the JSON schema field "kind". + Kind EksclusterKfdV1Alpha2Kind `json:"kind" yaml:"kind" mapstructure:"kind"` + + // Metadata corresponds to the JSON schema field "metadata". + Metadata Metadata `json:"metadata" yaml:"metadata" mapstructure:"metadata"` + + // Spec corresponds to the JSON schema field "spec". + Spec Spec `json:"spec" yaml:"spec" mapstructure:"spec"` } // UnmarshalJSON implements json.Unmarshaler. diff --git a/pkg/apis/ekscluster/v1alpha2/public/schema.go b/pkg/apis/ekscluster/v1alpha2/public/schema.go index adaa80c83..f63a2cc65 100644 --- a/pkg/apis/ekscluster/v1alpha2/public/schema.go +++ b/pkg/apis/ekscluster/v1alpha2/public/schema.go @@ -6,9 +6,11 @@ import ( "encoding/json" "fmt" "reflect" + + "github.com/sighupio/go-jsonschema/pkg/types" ) -// A Fury Cluster deployed through AWS's Elastic Kubernetes Service +// A KFD Cluster deployed on top of AWS's Elastic Kubernetes Service (EKS). type EksclusterKfdV1Alpha2 struct { // ApiVersion corresponds to the JSON schema field "apiVersion". ApiVersion string `json:"apiVersion" yaml:"apiVersion" mapstructure:"apiVersion"` @@ -28,7 +30,8 @@ type EksclusterKfdV1Alpha2Kind string const EksclusterKfdV1Alpha2KindEKSCluster EksclusterKfdV1Alpha2Kind = "EKSCluster" type Metadata struct { - // Name corresponds to the JSON schema field "name". + // The name of the cluster. It will also be used as a prefix for all the other + // resources created. Name string `json:"name" yaml:"name" mapstructure:"name"` } @@ -36,7 +39,9 @@ type Spec struct { // Distribution corresponds to the JSON schema field "distribution". Distribution SpecDistribution `json:"distribution" yaml:"distribution" mapstructure:"distribution"` - // DistributionVersion corresponds to the JSON schema field "distributionVersion". + // Defines which KFD version will be installed and, in consequence, the Kubernetes + // version used to create the cluster. It supports git tags and branches. Example: + // `v1.30.1`. DistributionVersion string `json:"distributionVersion" yaml:"distributionVersion" mapstructure:"distributionVersion"` // Infrastructure corresponds to the JSON schema field "infrastructure". @@ -48,14 +53,15 @@ type Spec struct { // Plugins corresponds to the JSON schema field "plugins". Plugins *SpecPlugins `json:"plugins,omitempty" yaml:"plugins,omitempty" mapstructure:"plugins,omitempty"` - // Region corresponds to the JSON schema field "region". + // Defines in which AWS region the cluster and all the related resources will be + // created. Region TypesAwsRegion `json:"region" yaml:"region" mapstructure:"region"` // This map defines which will be the common tags that will be added to all the // resources created on AWS. Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"` - // ToolsConfiguration corresponds to the JSON schema field "toolsConfiguration". + // Configuration for tools used by furyctl, like Terraform. ToolsConfiguration SpecToolsConfiguration `json:"toolsConfiguration" yaml:"toolsConfiguration" mapstructure:"toolsConfiguration"` } @@ -70,29 +76,38 @@ type SpecDistribution struct { Modules SpecDistributionModules `json:"modules" yaml:"modules" mapstructure:"modules"` } +// Common configuration for all the distribution modules. type SpecDistributionCommon struct { - // The node selector to use to place the pods for all the KFD modules + // The node selector to use to place the pods for all the KFD modules. Follows + // Kubernetes selector format. Example: `node.kubernetes.io/role: infra`. NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` // Provider corresponds to the JSON schema field "provider". Provider *SpecDistributionCommonProvider `json:"provider,omitempty" yaml:"provider,omitempty" mapstructure:"provider,omitempty"` // URL of the registry where to pull images from for the Distribution phase. - // (Default is registry.sighup.io/fury). + // (Default is `registry.sighup.io/fury`). // // NOTE: If plugins are pulling from the default registry, the registry will be - // replaced for these plugins too. + // replaced for the plugin too. Registry *string `json:"registry,omitempty" yaml:"registry,omitempty" mapstructure:"registry,omitempty"` - // The relative path to the vendor directory, does not need to be changed + // The relative path to the vendor directory, does not need to be changed. RelativeVendorPath *string `json:"relativeVendorPath,omitempty" yaml:"relativeVendorPath,omitempty" mapstructure:"relativeVendorPath,omitempty"` - // The tolerations that will be added to the pods for all the KFD modules + // An array with the tolerations that will be added to the pods for all the KFD + // modules. Follows Kubernetes tolerations format. Example: + // + // ```yaml + // - effect: NoSchedule + // key: node.kubernetes.io/role + // value: infra + // ``` Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } type SpecDistributionCommonProvider struct { - // The type of the provider, must be EKS if specified + // The provider type. Don't set. FOR INTERNAL USE ONLY. Type string `json:"type" yaml:"type" mapstructure:"type"` } @@ -294,8 +309,11 @@ type SpecDistributionModules struct { Tracing *SpecDistributionModulesTracing `json:"tracing,omitempty" yaml:"tracing,omitempty" mapstructure:"tracing,omitempty"` } +// Configuration for the Auth module. type SpecDistributionModulesAuth struct { - // The base domain for the auth module + // The base domain for the ingresses created by the Auth module (Gangplank, + // Pomerium, Dex). Notice that when the ingress module type is `dual`, these will + // use the `external` ingress class. BaseDomain *string `json:"baseDomain,omitempty" yaml:"baseDomain,omitempty" mapstructure:"baseDomain,omitempty"` // Dex corresponds to the JSON schema field "dex". @@ -311,11 +329,25 @@ type SpecDistributionModulesAuth struct { Provider SpecDistributionModulesAuthProvider `json:"provider" yaml:"provider" mapstructure:"provider"` } +// Configuration for the Dex package. type SpecDistributionModulesAuthDex struct { - // The additional static clients for dex + // Additional static clients defitions that will be added to the default clients + // included with the distribution in Dex's configuration. Example: + // + // ```yaml + // additionalStaticClients: + // - id: my-custom-client + // name: "A custom additional static client" + // redirectURIs: + // - "https://myapp.tld/redirect" + // - "https://alias.tld/oidc-callback" + // secret: supersecretpassword + // ``` + // Reference: https://dexidp.io/docs/connectors/local/ AdditionalStaticClients []interface{} `json:"additionalStaticClients,omitempty" yaml:"additionalStaticClients,omitempty" mapstructure:"additionalStaticClients,omitempty"` - // The connectors for dex + // A list with each item defining a Dex connector. Follows Dex connectors + // configuration format: https://dexidp.io/docs/connectors/ Connectors []interface{} `json:"connectors" yaml:"connectors" mapstructure:"connectors"` // Expiry corresponds to the JSON schema field "expiry". @@ -333,25 +365,29 @@ type SpecDistributionModulesAuthDexExpiry struct { SigningKeys *string `json:"signingKeys,omitempty" yaml:"signingKeys,omitempty" mapstructure:"signingKeys,omitempty"` } +// Override the common configuration with a particular configuration for the Auth +// module. type SpecDistributionModulesAuthOverrides struct { - // Ingresses corresponds to the JSON schema field "ingresses". + // Override the definition of the Auth module ingresses. Ingresses SpecDistributionModulesAuthOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` - // The node selector to use to place the pods for the auth module + // Set to override the node selector used to place the pods of the Auth module. NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - // The tolerations that will be added to the pods for the auth module + // Set to override the tolerations that will be added to the pods of the Auth + // module. Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } type SpecDistributionModulesAuthOverridesIngress struct { - // The host of the ingress + // Use this host for the ingress instead of the default one. Host string `json:"host" yaml:"host" mapstructure:"host"` - // The ingress class of the ingress + // Use this ingress class for the ingress instead of the default one. IngressClass string `json:"ingressClass" yaml:"ingressClass" mapstructure:"ingressClass"` } +// Override the definition of the Auth module ingresses. type SpecDistributionModulesAuthOverridesIngresses map[string]SpecDistributionModulesAuthOverridesIngress type SpecDistributionModulesAuthPomerium interface{} @@ -476,15 +512,23 @@ type SpecDistributionModulesAuthProvider struct { // BasicAuth corresponds to the JSON schema field "basicAuth". BasicAuth *SpecDistributionModulesAuthProviderBasicAuth `json:"basicAuth,omitempty" yaml:"basicAuth,omitempty" mapstructure:"basicAuth,omitempty"` - // The type of the provider, must be ***none***, ***sso*** or ***basicAuth*** + // The type of the Auth provider, options are: + // - `none`: will disable authentication in the infrastructural ingresses. + // - `sso`: will protect the infrastructural ingresses with Pomerium and Dex (SSO) + // and require authentication before accessing them. + // - `basicAuth`: will protect the infrastructural ingresses with HTTP basic auth + // (username and password) authentication. + // + // Default is `none`. Type SpecDistributionModulesAuthProviderType `json:"type" yaml:"type" mapstructure:"type"` } +// Configuration for the HTTP Basic Auth provider. type SpecDistributionModulesAuthProviderBasicAuth struct { - // The password for the basic auth + // The password for logging in with the HTTP basic authentication. Password string `json:"password" yaml:"password" mapstructure:"password"` - // The username for the basic auth + // The username for logging in with the HTTP basic authentication. Username string `json:"username" yaml:"username" mapstructure:"username"` } @@ -535,11 +579,16 @@ type SpecDistributionModulesAwsLoadBalancerController struct { Overrides *TypesFuryModuleComponentOverridesWithIAMRoleName `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } +// Configuration for the Disaster Recovery module. type SpecDistributionModulesDr struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - // The type of the DR, must be ***none*** or ***eks*** + // The type of the Disaster Recovery, must be `none` or `eks`. `none` disables the + // module and `eks` will install Velero and use an S3 bucket to store the + // backups. + // + // Default is `none`. Type SpecDistributionModulesDrType `json:"type" yaml:"type" mapstructure:"type"` // Velero corresponds to the JSON schema field "velero". @@ -559,38 +608,98 @@ type SpecDistributionModulesDrVelero struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // Configuration for Velero's backup schedules. + Schedules *SpecDistributionModulesDrVeleroSchedules `json:"schedules,omitempty" yaml:"schedules,omitempty" mapstructure:"schedules,omitempty"` } type SpecDistributionModulesDrVeleroEks struct { - // The name of the velero bucket + // The name of the bucket for Velero. BucketName TypesAwsS3BucketName `json:"bucketName" yaml:"bucketName" mapstructure:"bucketName"` - // The region where the velero bucket is located + // The region where the bucket for Velero will be located. Region TypesAwsRegion `json:"region" yaml:"region" mapstructure:"region"` } +// Configuration for Velero's backup schedules. +type SpecDistributionModulesDrVeleroSchedules struct { + // Configuration for Velero schedules. + Definitions *SpecDistributionModulesDrVeleroSchedulesDefinitions `json:"definitions,omitempty" yaml:"definitions,omitempty" mapstructure:"definitions,omitempty"` + + // Whether to install or not the default `manifests` and `full` backups schedules. + // Default is `true`. + Install *bool `json:"install,omitempty" yaml:"install,omitempty" mapstructure:"install,omitempty"` +} + +// Configuration for Velero schedules. +type SpecDistributionModulesDrVeleroSchedulesDefinitions struct { + // Configuration for Velero's manifests backup schedule. + Full *SpecDistributionModulesDrVeleroSchedulesDefinitionsFull `json:"full,omitempty" yaml:"full,omitempty" mapstructure:"full,omitempty"` + + // Configuration for Velero's manifests backup schedule. + Manifests *SpecDistributionModulesDrVeleroSchedulesDefinitionsManifests `json:"manifests,omitempty" yaml:"manifests,omitempty" mapstructure:"manifests,omitempty"` +} + +// Configuration for Velero's manifests backup schedule. +type SpecDistributionModulesDrVeleroSchedulesDefinitionsFull struct { + // The cron expression for the `full` backup schedule (default `0 1 * * *`). + Schedule *string `json:"schedule,omitempty" yaml:"schedule,omitempty" mapstructure:"schedule,omitempty"` + + // EXPERIMENTAL (if you do more than one backups, the following backups after the + // first are not automatically restorable, see + // https://github.com/vmware-tanzu/velero/issues/7057#issuecomment-2466815898 for + // the manual restore solution): SnapshotMoveData specifies whether snapshot data + // should be moved. Velero will create a new volume from the snapshot and upload + // the content to the storageLocation. + SnapshotMoveData *bool `json:"snapshotMoveData,omitempty" yaml:"snapshotMoveData,omitempty" mapstructure:"snapshotMoveData,omitempty"` + + // The Time To Live (TTL) of the backups created by the backup schedules (default + // `720h0m0s`, 30 days). Notice that changing this value will affect only newly + // created backups, prior backups will keep the old TTL. + Ttl *string `json:"ttl,omitempty" yaml:"ttl,omitempty" mapstructure:"ttl,omitempty"` +} + +// Configuration for Velero's manifests backup schedule. +type SpecDistributionModulesDrVeleroSchedulesDefinitionsManifests struct { + // The cron expression for the `manifests` backup schedule (default `*/15 * * * + // *`). + Schedule *string `json:"schedule,omitempty" yaml:"schedule,omitempty" mapstructure:"schedule,omitempty"` + + // The Time To Live (TTL) of the backups created by the backup schedules (default + // `720h0m0s`, 30 days). Notice that changing this value will affect only newly + // created backups, prior backups will keep the old TTL. + Ttl *string `json:"ttl,omitempty" yaml:"ttl,omitempty" mapstructure:"ttl,omitempty"` +} + type SpecDistributionModulesIngress struct { - // the base domain used for all the KFD ingresses, if in the nginx dual - // configuration, it should be the same as the - // .spec.distribution.modules.ingress.dns.private.name zone + // The base domain used for all the KFD infrastructural ingresses. If in the nginx + // `dual` configuration type, this value should be the same as the + // `.spec.distribution.modules.ingress.dns.private.name` zone. BaseDomain string `json:"baseDomain" yaml:"baseDomain" mapstructure:"baseDomain"` - // CertManager corresponds to the JSON schema field "certManager". + // Configuration for the cert-manager package. Required even if + // `ingress.nginx.type` is `none`, cert-manager is used for managing other + // certificates in the cluster besides the TLS termination certificates for the + // ingresses. CertManager *SpecDistributionModulesIngressCertManager `json:"certManager,omitempty" yaml:"certManager,omitempty" mapstructure:"certManager,omitempty"` // Dns corresponds to the JSON schema field "dns". - Dns SpecDistributionModulesIngressDNS `json:"dns" yaml:"dns" mapstructure:"dns"` + Dns *SpecDistributionModulesIngressDNS `json:"dns,omitempty" yaml:"dns,omitempty" mapstructure:"dns,omitempty"` // Forecastle corresponds to the JSON schema field "forecastle". Forecastle *SpecDistributionModulesIngressForecastle `json:"forecastle,omitempty" yaml:"forecastle,omitempty" mapstructure:"forecastle,omitempty"` - // Configurations for the nginx ingress controller module + // Configurations for the Ingress nginx controller package. Nginx SpecDistributionModulesIngressNginx `json:"nginx" yaml:"nginx" mapstructure:"nginx"` // Overrides corresponds to the JSON schema field "overrides". Overrides *SpecDistributionModulesIngressOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } +// Configuration for the cert-manager package. Required even if +// `ingress.nginx.type` is `none`, cert-manager is used for managing other +// certificates in the cluster besides the TLS termination certificates for the +// ingresses. type SpecDistributionModulesIngressCertManager struct { // ClusterIssuer corresponds to the JSON schema field "clusterIssuer". ClusterIssuer SpecDistributionModulesIngressCertManagerClusterIssuer `json:"clusterIssuer" yaml:"clusterIssuer" mapstructure:"clusterIssuer"` @@ -599,17 +708,23 @@ type SpecDistributionModulesIngressCertManager struct { Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } +// Configuration for the cert-manager's ACME clusterIssuer used to request +// certificates from Let's Encrypt. type SpecDistributionModulesIngressCertManagerClusterIssuer struct { - // The email of the cluster issuer + // The email address to use during the certificate issuing process. Email string `json:"email" yaml:"email" mapstructure:"email"` - // The name of the cluster issuer + // The name of the clusterIssuer. Name string `json:"name" yaml:"name" mapstructure:"name"` - // The custom solvers configurations + // The list of challenge solvers to use instead of the default one for the + // `http01` challenge. Check [cert manager's + // documentation](https://cert-manager.io/docs/configuration/acme/#adding-multiple-solver-types) + // for examples for this field. Solvers []interface{} `json:"solvers,omitempty" yaml:"solvers,omitempty" mapstructure:"solvers,omitempty"` - // The type of the cluster issuer, must be ***dns01*** or ***http01*** + // The type of the clusterIssuer, must be `dns01` for using DNS challenge or + // `http01` for using HTTP challenge. Type *SpecDistributionModulesIngressCertManagerClusterIssuerType `json:"type,omitempty" yaml:"type,omitempty" mapstructure:"type,omitempty"` } @@ -620,30 +735,36 @@ const ( SpecDistributionModulesIngressCertManagerClusterIssuerTypeHttp01 SpecDistributionModulesIngressCertManagerClusterIssuerType = "http01" ) +// DNS definition, used in conjunction with `externalDNS` package to automate DNS +// management and certificates emission. type SpecDistributionModulesIngressDNS struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` // Private corresponds to the JSON schema field "private". - Private SpecDistributionModulesIngressDNSPrivate `json:"private" yaml:"private" mapstructure:"private"` + Private *SpecDistributionModulesIngressDNSPrivate `json:"private,omitempty" yaml:"private,omitempty" mapstructure:"private,omitempty"` // Public corresponds to the JSON schema field "public". - Public SpecDistributionModulesIngressDNSPublic `json:"public" yaml:"public" mapstructure:"public"` + Public *SpecDistributionModulesIngressDNSPublic `json:"public,omitempty" yaml:"public,omitempty" mapstructure:"public,omitempty"` } +// The private DNS zone is used only when `ingress.nginx.type` is `dual`, for +// exposing infrastructural services only in the private DNS zone. type SpecDistributionModulesIngressDNSPrivate struct { - // If true, the private hosted zone will be created + // By default, a Terraform data source will be used to get the private DNS zone. + // Set to `true` to create the private zone instead. Create bool `json:"create" yaml:"create" mapstructure:"create"` - // The name of the private hosted zone + // The name of the private hosted zone. Example: `internal.fury-demo.sighup.io`. Name string `json:"name" yaml:"name" mapstructure:"name"` } type SpecDistributionModulesIngressDNSPublic struct { - // If true, the public hosted zone will be created + // By default, a Terraform data source will be used to get the public DNS zone. + // Set to `true` to create the public zone instead. Create bool `json:"create" yaml:"create" mapstructure:"create"` - // The name of the public hosted zone + // The name of the public hosted zone. Name string `json:"name" yaml:"name" mapstructure:"name"` } @@ -659,14 +780,24 @@ type SpecDistributionModulesIngressNginx struct { // Tls corresponds to the JSON schema field "tls". Tls *SpecDistributionModulesIngressNginxTLS `json:"tls,omitempty" yaml:"tls,omitempty" mapstructure:"tls,omitempty"` - // The type of the nginx ingress controller, must be ***none***, ***single*** or - // ***dual*** + // The type of the Ingress nginx controller, options are: + // - `none`: no ingress controller will be installed and no infrastructural + // ingresses will be created. + // - `single`: a single ingress controller with ingress class `nginx` will be + // installed to manage all the ingress resources, infrastructural ingresses will + // be created. + // - `dual`: two independent ingress controllers will be installed, one for the + // `internal` ingress class intended for private ingresses and one for the + // `external` ingress class intended for public ingresses. KFD infrastructural + // ingresses wil use the `internal` ingress class when using the dual type. + // + // Default is `single`. Type SpecDistributionModulesIngressNginxType `json:"type" yaml:"type" mapstructure:"type"` } type SpecDistributionModulesIngressNginxTLS struct { - // The provider of the TLS certificate, must be ***none***, ***certManager*** or - // ***secret*** + // The provider of the TLS certificates for the ingresses, one of: `none`, + // `certManager`, or `secret`. Provider SpecDistributionModulesIngressNginxTLSProvider `json:"provider" yaml:"provider" mapstructure:"provider"` // Secret corresponds to the JSON schema field "secret". @@ -681,15 +812,18 @@ const ( SpecDistributionModulesIngressNginxTLSProviderSecret SpecDistributionModulesIngressNginxTLSProvider = "secret" ) +// Kubernetes TLS secret for the ingresses TLS certificate. type SpecDistributionModulesIngressNginxTLSSecret struct { - // Ca corresponds to the JSON schema field "ca". + // The Certificate Authority certificate file's content. You can use the + // `"{file://}"` notation to get the content from a file. Ca string `json:"ca" yaml:"ca" mapstructure:"ca"` - // The certificate file content or you can use the file notation to get the - // content from a file + // The certificate file's content. You can use the `"{file://}"` notation to + // get the content from a file. Cert string `json:"cert" yaml:"cert" mapstructure:"cert"` - // Key corresponds to the JSON schema field "key". + // The signing key file's content. You can use the `"{file://}"` notation to + // get the content from a file. Key string `json:"key" yaml:"key" mapstructure:"key"` } @@ -701,14 +835,17 @@ const ( SpecDistributionModulesIngressNginxTypeSingle SpecDistributionModulesIngressNginxType = "single" ) +// Override the common configuration with a particular configuration for the +// Ingress module. type SpecDistributionModulesIngressOverrides struct { // Ingresses corresponds to the JSON schema field "ingresses". Ingresses *SpecDistributionModulesIngressOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` - // The node selector to use to place the pods for the ingress module + // Set to override the node selector used to place the pods of the Ingress module. NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - // The tolerations that will be added to the pods for the ingress module + // Set to override the tolerations that will be added to the pods of the Ingress + // module. Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } @@ -717,6 +854,7 @@ type SpecDistributionModulesIngressOverridesIngresses struct { Forecastle *TypesFuryModuleOverridesIngress `json:"forecastle,omitempty" yaml:"forecastle,omitempty" mapstructure:"forecastle,omitempty"` } +// Configuration for the Logging module. type SpecDistributionModulesLogging struct { // Cerebro corresponds to the JSON schema field "cerebro". Cerebro *SpecDistributionModulesLoggingCerebro `json:"cerebro,omitempty" yaml:"cerebro,omitempty" mapstructure:"cerebro,omitempty"` @@ -739,83 +877,104 @@ type SpecDistributionModulesLogging struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - // selects the logging stack. Choosing none will disable the centralized logging. - // Choosing opensearch will deploy and configure the Logging Operator and an + // Selects the logging stack. Options are: + // - `none`: will disable the centralized logging. + // - `opensearch`: will deploy and configure the Logging Operator and an // OpenSearch cluster (can be single or triple for HA) where the logs will be - // stored. Choosing loki will use a distributed Grafana Loki instead of OpenSearh - // for storage. Choosing customOuput the Logging Operator will be deployed and - // installed but with no local storage, you will have to create the needed Outputs - // and ClusterOutputs to ship the logs to your desired storage. + // stored. + // - `loki`: will use a distributed Grafana Loki instead of OpenSearch for + // storage. + // - `customOuputs`: the Logging Operator will be deployed and installed but + // without in-cluster storage, you will have to create the needed Outputs and + // ClusterOutputs to ship the logs to your desired storage. + // + // Default is `opensearch`. Type SpecDistributionModulesLoggingType `json:"type" yaml:"type" mapstructure:"type"` } +// DEPRECATED since KFD v1.26.6, 1.27.5, v1.28.0. type SpecDistributionModulesLoggingCerebro struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } -// when using the customOutputs logging type, you need to manually specify the spec -// of the several Output and ClusterOutputs that the Logging Operator expects to -// forward the logs collected by the pre-defined flows. +// When using the `customOutputs` logging type, you need to manually specify the +// spec of the several `Output` and `ClusterOutputs` that the Logging Operator +// expects to forward the logs collected by the pre-defined flows. type SpecDistributionModulesLoggingCustomOutputs struct { - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `audit` Flow will be sent. This + // will be the `spec` section of the `Output` object. It must be a string (and not + // a YAML object) following the OutputSpec definition. Use the `nullout` output to + // discard the flow: `nullout: {}` Audit string `json:"audit" yaml:"audit" mapstructure:"audit"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `errors` Flow will be sent. This + // will be the `spec` section of the `Output` object. It must be a string (and not + // a YAML object) following the OutputSpec definition. Use the `nullout` output to + // discard the flow: `nullout: {}` Errors string `json:"errors" yaml:"errors" mapstructure:"errors"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `events` Flow will be sent. This + // will be the `spec` section of the `Output` object. It must be a string (and not + // a YAML object) following the OutputSpec definition. Use the `nullout` output to + // discard the flow: `nullout: {}` Events string `json:"events" yaml:"events" mapstructure:"events"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `infra` Flow will be sent. This + // will be the `spec` section of the `Output` object. It must be a string (and not + // a YAML object) following the OutputSpec definition. Use the `nullout` output to + // discard the flow: `nullout: {}` Infra string `json:"infra" yaml:"infra" mapstructure:"infra"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `ingressNginx` Flow will be sent. + // This will be the `spec` section of the `Output` object. It must be a string + // (and not a YAML object) following the OutputSpec definition. Use the `nullout` + // output to discard the flow: `nullout: {}` IngressNginx string `json:"ingressNginx" yaml:"ingressNginx" mapstructure:"ingressNginx"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `kubernetes` Flow will be sent. + // This will be the `spec` section of the `Output` object. It must be a string + // (and not a YAML object) following the OutputSpec definition. Use the `nullout` + // output to discard the flow: `nullout: {}` Kubernetes string `json:"kubernetes" yaml:"kubernetes" mapstructure:"kubernetes"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `systemdCommon` Flow will be sent. + // This will be the `spec` section of the `Output` object. It must be a string + // (and not a YAML object) following the OutputSpec definition. Use the `nullout` + // output to discard the flow: `nullout: {}` SystemdCommon string `json:"systemdCommon" yaml:"systemdCommon" mapstructure:"systemdCommon"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `systemdEtcd` Flow will be sent. + // This will be the `spec` section of the `Output` object. It must be a string + // (and not a YAML object) following the OutputSpec definition. Use the `nullout` + // output to discard the flow: `nullout: {}` SystemdEtcd string `json:"systemdEtcd" yaml:"systemdEtcd" mapstructure:"systemdEtcd"` } +// Configuration for the Loki package. type SpecDistributionModulesLoggingLoki struct { - // Backend corresponds to the JSON schema field "backend". + // The storage backend type for Loki. `minio` will use an in-cluster MinIO + // deployment for object storage, `externalEndpoint` can be used to point to an + // external object storage instead of deploying an in-cluster MinIO. Backend *SpecDistributionModulesLoggingLokiBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` - // ExternalEndpoint corresponds to the JSON schema field "externalEndpoint". + // Configuration for Loki's external storage backend. ExternalEndpoint *SpecDistributionModulesLoggingLokiExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` // Resources corresponds to the JSON schema field "resources". Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` + + // Starting from versions 1.28.4, 1.29.5 and 1.30.0 of KFD, Loki will change the + // time series database from BoltDB to TSDB and the schema from v11 to v13 that it + // uses to store the logs. + // + // The value of this field will determine the date when Loki will start writing + // using the new TSDB and the schema v13, always at midnight UTC. The old BoltDB + // and schema will be kept until they expire for reading purposes. + // + // Value must be a string in `ISO 8601` date format (`yyyy-mm-dd`). Example: + // `2024-11-18`. + TsdbStartDate types.SerializableDate `json:"tsdbStartDate" yaml:"tsdbStartDate" mapstructure:"tsdbStartDate"` } type SpecDistributionModulesLoggingLokiBackend string @@ -825,23 +984,25 @@ const ( SpecDistributionModulesLoggingLokiBackendMinio SpecDistributionModulesLoggingLokiBackend = "minio" ) +// Configuration for Loki's external storage backend. type SpecDistributionModulesLoggingLokiExternalEndpoint struct { - // The access key id of the loki external endpoint + // The access key ID (username) for the external S3-compatible bucket. AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` - // The bucket name of the loki external endpoint + // The bucket name of the external S3-compatible object storage. BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` - // The endpoint of the loki external endpoint + // External S3-compatible endpoint for Loki's storage. Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` - // If true, the loki external endpoint will be insecure + // If true, will use HTTP as protocol instead of HTTPS. Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` - // The secret access key of the loki external endpoint + // The secret access key (password) for the external S3-compatible bucket. SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` } +// Configuration for Logging's MinIO deployment. type SpecDistributionModulesLoggingMinio struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` @@ -849,15 +1010,15 @@ type SpecDistributionModulesLoggingMinio struct { // RootUser corresponds to the JSON schema field "rootUser". RootUser *SpecDistributionModulesLoggingMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` - // The PVC size for each minio disk, 6 disks total + // The PVC size for each MinIO disk, 6 disks total. StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` } type SpecDistributionModulesLoggingMinioRootUser struct { - // The password of the minio root user + // The password for the default MinIO root user. Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` - // The username of the minio root user + // The username for the default MinIO root user. Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` } @@ -868,10 +1029,12 @@ type SpecDistributionModulesLoggingOpensearch struct { // Resources corresponds to the JSON schema field "resources". Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` - // The storage size for the opensearch pods + // The storage size for the OpenSearch volumes. Follows Kubernetes resources + // storage requests. Default is `150Gi`. StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` - // The type of the opensearch, must be ***single*** or ***triple*** + // The type of OpenSearch deployment. One of: `single` for a single replica or + // `triple` for an HA 3-replicas deployment. Type SpecDistributionModulesLoggingOpensearchType `json:"type" yaml:"type" mapstructure:"type"` } @@ -882,6 +1045,7 @@ const ( SpecDistributionModulesLoggingOpensearchTypeTriple SpecDistributionModulesLoggingOpensearchType = "triple" ) +// Configuration for the Logging Operator. type SpecDistributionModulesLoggingOperator struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` @@ -896,7 +1060,7 @@ const ( SpecDistributionModulesLoggingTypeOpensearch SpecDistributionModulesLoggingType = "opensearch" ) -// configuration for the Monitoring module components +// Configuration for the Monitoring module. type SpecDistributionModulesMonitoring struct { // Alertmanager corresponds to the JSON schema field "alertmanager". Alertmanager *SpecDistributionModulesMonitoringAlertManager `json:"alertmanager,omitempty" yaml:"alertmanager,omitempty" mapstructure:"alertmanager,omitempty"` @@ -925,22 +1089,23 @@ type SpecDistributionModulesMonitoring struct { // PrometheusAgent corresponds to the JSON schema field "prometheusAgent". PrometheusAgent *SpecDistributionModulesMonitoringPrometheusAgent `json:"prometheusAgent,omitempty" yaml:"prometheusAgent,omitempty" mapstructure:"prometheusAgent,omitempty"` - // The type of the monitoring, must be ***none***, ***prometheus***, - // ***prometheusAgent*** or ***mimir***. + // The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or + // `mimir`. // // - `none`: will disable the whole monitoring stack. // - `prometheus`: will install Prometheus Operator and a preconfigured Prometheus // instance, Alertmanager, a set of alert rules, exporters needed to monitor all // the components of the cluster, Grafana and a series of dashboards to view the // collected metrics, and more. - // - `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus - // in Agent mode (no alerting, no queries, no storage), and all the exporters - // needed to get metrics for the status of the cluster and the workloads. Useful - // when having a centralized (remote) Prometheus where to ship the metrics and not - // storing them locally in the cluster. - // - `mimir`: will install the same as the `prometheus` option, and in addition - // Grafana Mimir that allows for longer retention of metrics and the usage of - // Object Storage. + // - `prometheusAgent`: will install Prometheus operator, an instance of + // Prometheus in Agent mode (no alerting, no queries, no storage), and all the + // exporters needed to get metrics for the status of the cluster and the + // workloads. Useful when having a centralized (remote) Prometheus where to ship + // the metrics and not storing them locally in the cluster. + // - `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir + // that allows for longer retention of metrics and the usage of Object Storage. + // + // Default is `prometheus`. Type SpecDistributionModulesMonitoringType `json:"type" yaml:"type" mapstructure:"type"` // X509Exporter corresponds to the JSON schema field "x509Exporter". @@ -948,14 +1113,15 @@ type SpecDistributionModulesMonitoring struct { } type SpecDistributionModulesMonitoringAlertManager struct { - // The webhook url to send deadman switch monitoring, for example to use with - // healthchecks.io + // The webhook URL to send dead man's switch monitoring, for example to use with + // healthchecks.io. DeadManSwitchWebhookUrl *string `json:"deadManSwitchWebhookUrl,omitempty" yaml:"deadManSwitchWebhookUrl,omitempty" mapstructure:"deadManSwitchWebhookUrl,omitempty"` - // If true, the default rules will be installed + // Set to false to avoid installing the Prometheus rules (alerts) included with + // the distribution. InstallDefaultRules *bool `json:"installDefaultRules,omitempty" yaml:"installDefaultRules,omitempty" mapstructure:"installDefaultRules,omitempty"` - // The slack webhook url to send alerts + // The Slack webhook URL where to send the infrastructural and workload alerts to. SlackWebhookUrl *string `json:"slackWebhookUrl,omitempty" yaml:"slackWebhookUrl,omitempty" mapstructure:"slackWebhookUrl,omitempty"` } @@ -994,17 +1160,22 @@ type SpecDistributionModulesMonitoringKubeStateMetrics struct { Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } +// Configuration for the Mimir package. type SpecDistributionModulesMonitoringMimir struct { - // The backend for the mimir pods, must be ***minio*** or ***externalEndpoint*** + // The storage backend type for Mimir. `minio` will use an in-cluster MinIO + // deployment for object storage, `externalEndpoint` can be used to point to an + // external S3-compatible object storage instead of deploying an in-cluster MinIO. Backend *SpecDistributionModulesMonitoringMimirBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` - // ExternalEndpoint corresponds to the JSON schema field "externalEndpoint". + // Configuration for Mimir's external storage backend. ExternalEndpoint *SpecDistributionModulesMonitoringMimirExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - // The retention time for the mimir pods + // The retention time for the logs stored in Mimir. Default is `30d`. Value must + // match the regular expression `[0-9]+(ns|us|µs|ms|s|m|h|d|w|y)` where y = 365 + // days. RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` } @@ -1015,23 +1186,25 @@ const ( SpecDistributionModulesMonitoringMimirBackendMinio SpecDistributionModulesMonitoringMimirBackend = "minio" ) +// Configuration for Mimir's external storage backend. type SpecDistributionModulesMonitoringMimirExternalEndpoint struct { - // The access key id of the external mimir backend + // The access key ID (username) for the external S3-compatible bucket. AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` - // The bucket name of the external mimir backend + // The bucket name of the external S3-compatible object storage. BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` - // The endpoint of the external mimir backend + // The external S3-compatible endpoint for Mimir's storage. Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` - // If true, the external mimir backend will not use tls + // If true, will use HTTP as protocol instead of HTTPS. Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` - // The secret access key of the external mimir backend + // The secret access key (password) for the external S3-compatible bucket. SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` } +// Configuration for Monitoring's MinIO deployment. type SpecDistributionModulesMonitoringMinio struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` @@ -1039,15 +1212,15 @@ type SpecDistributionModulesMonitoringMinio struct { // RootUser corresponds to the JSON schema field "rootUser". RootUser *SpecDistributionModulesMonitoringMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` - // The storage size for the minio pods + // The PVC size for each MinIO disk, 6 disks total. StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` } type SpecDistributionModulesMonitoringMinioRootUser struct { - // The password for the minio root user + // The password for the default MinIO root user. Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` - // The username for the minio root user + // The username for the default MinIO root user. Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` } @@ -1064,13 +1237,13 @@ type SpecDistributionModulesMonitoringPrometheus struct { // Resources corresponds to the JSON schema field "resources". Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` - // The retention size for the k8s Prometheus instance. + // The retention size for the `k8s` Prometheus instance. RetentionSize *string `json:"retentionSize,omitempty" yaml:"retentionSize,omitempty" mapstructure:"retentionSize,omitempty"` - // The retention time for the k8s Prometheus instance. + // The retention time for the `k8s` Prometheus instance. RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` - // The storage size for the k8s Prometheus instance. + // The storage size for the `k8s` Prometheus instance. StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` } @@ -1106,9 +1279,10 @@ type SpecDistributionModulesMonitoringX509Exporter struct { Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } +// Configuration for the Networking module. type SpecDistributionModulesNetworking struct { // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` // TigeraOperator corresponds to the JSON schema field "tigeraOperator". TigeraOperator *SpecDistributionModulesNetworkingTigeraOperator `json:"tigeraOperator,omitempty" yaml:"tigeraOperator,omitempty" mapstructure:"tigeraOperator,omitempty"` @@ -1119,6 +1293,7 @@ type SpecDistributionModulesNetworkingTigeraOperator struct { Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } +// Configuration for the Policy module. type SpecDistributionModulesPolicy struct { // Gatekeeper corresponds to the JSON schema field "gatekeeper". Gatekeeper *SpecDistributionModulesPolicyGatekeeper `json:"gatekeeper,omitempty" yaml:"gatekeeper,omitempty" mapstructure:"gatekeeper,omitempty"` @@ -1129,20 +1304,27 @@ type SpecDistributionModulesPolicy struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - // The type of security to use, either ***none***, ***gatekeeper*** or - // ***kyverno*** + // The type of policy enforcement to use, either `none`, `gatekeeper` or + // `kyverno`. + // + // Default is `none`. Type SpecDistributionModulesPolicyType `json:"type" yaml:"type" mapstructure:"type"` } +// Configuration for the Gatekeeper package. type SpecDistributionModulesPolicyGatekeeper struct { // This parameter adds namespaces to Gatekeeper's exemption list, so it will not // enforce the constraints on them. AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"` - // The enforcement action to use for the gatekeeper module + // The default enforcement action to use for the included constraints. `deny` will + // block the admission when violations to the policies are found, `warn` will show + // a message to the user but will admit the violating requests and `dryrun` won't + // give any feedback to the user but it will log the violations. EnforcementAction SpecDistributionModulesPolicyGatekeeperEnforcementAction `json:"enforcementAction" yaml:"enforcementAction" mapstructure:"enforcementAction"` - // If true, the default policies will be installed + // Set to `false` to avoid installing the default Gatekeeper policies (constraints + // templates and constraints) included with the distribution. InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"` // Overrides corresponds to the JSON schema field "overrides". @@ -1157,26 +1339,30 @@ const ( SpecDistributionModulesPolicyGatekeeperEnforcementActionWarn SpecDistributionModulesPolicyGatekeeperEnforcementAction = "warn" ) +// Configuration for the Kyverno package. type SpecDistributionModulesPolicyKyverno struct { // This parameter adds namespaces to Kyverno's exemption list, so it will not - // enforce the constraints on them. + // enforce the policies on them. AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"` - // If true, the default policies will be installed + // Set to `false` to avoid installing the default Kyverno policies included with + // distribution. InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"` // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - // The validation failure action to use for the kyverno module + // The validation failure action to use for the policies, `Enforce` will block + // when a request does not comply with the policies and `Audit` will not block but + // log when a request does not comply with the policies. ValidationFailureAction SpecDistributionModulesPolicyKyvernoValidationFailureAction `json:"validationFailureAction" yaml:"validationFailureAction" mapstructure:"validationFailureAction"` } type SpecDistributionModulesPolicyKyvernoValidationFailureAction string const ( - SpecDistributionModulesPolicyKyvernoValidationFailureActionAudit SpecDistributionModulesPolicyKyvernoValidationFailureAction = "audit" - SpecDistributionModulesPolicyKyvernoValidationFailureActionEnforce SpecDistributionModulesPolicyKyvernoValidationFailureAction = "enforce" + SpecDistributionModulesPolicyKyvernoValidationFailureActionAudit SpecDistributionModulesPolicyKyvernoValidationFailureAction = "Audit" + SpecDistributionModulesPolicyKyvernoValidationFailureActionEnforce SpecDistributionModulesPolicyKyvernoValidationFailureAction = "Enforce" ) type SpecDistributionModulesPolicyType string @@ -1187,6 +1373,7 @@ const ( SpecDistributionModulesPolicyTypeNone SpecDistributionModulesPolicyType = "none" ) +// Configuration for the Tracing module. type SpecDistributionModulesTracing struct { // Minio corresponds to the JSON schema field "minio". Minio *SpecDistributionModulesTracingMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"` @@ -1197,10 +1384,14 @@ type SpecDistributionModulesTracing struct { // Tempo corresponds to the JSON schema field "tempo". Tempo *SpecDistributionModulesTracingTempo `json:"tempo,omitempty" yaml:"tempo,omitempty" mapstructure:"tempo,omitempty"` - // The type of tracing to use, either ***none*** or ***tempo*** + // The type of tracing to use, either `none` or `tempo`. `none` will disable the + // Tracing module and `tempo` will install a Grafana Tempo deployment. + // + // Default is `tempo`. Type SpecDistributionModulesTracingType `json:"type" yaml:"type" mapstructure:"type"` } +// Configuration for Tracing's MinIO deployment. type SpecDistributionModulesTracingMinio struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` @@ -1208,29 +1399,32 @@ type SpecDistributionModulesTracingMinio struct { // RootUser corresponds to the JSON schema field "rootUser". RootUser *SpecDistributionModulesTracingMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` - // The storage size for the minio pods + // The PVC size for each MinIO disk, 6 disks total. StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` } type SpecDistributionModulesTracingMinioRootUser struct { - // The password for the minio root user + // The password for the default MinIO root user. Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` - // The username for the minio root user + // The username for the default MinIO root user. Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` } +// Configuration for the Tempo package. type SpecDistributionModulesTracingTempo struct { - // The backend for the tempo pods, must be ***minio*** or ***externalEndpoint*** + // The storage backend type for Tempo. `minio` will use an in-cluster MinIO + // deployment for object storage, `externalEndpoint` can be used to point to an + // external S3-compatible object storage instead of deploying an in-cluster MinIO. Backend *SpecDistributionModulesTracingTempoBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` - // ExternalEndpoint corresponds to the JSON schema field "externalEndpoint". + // Configuration for Tempo's external storage backend. ExternalEndpoint *SpecDistributionModulesTracingTempoExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - // The retention time for the tempo pods + // The retention time for the traces stored in Tempo. RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` } @@ -1241,20 +1435,21 @@ const ( SpecDistributionModulesTracingTempoBackendMinio SpecDistributionModulesTracingTempoBackend = "minio" ) +// Configuration for Tempo's external storage backend. type SpecDistributionModulesTracingTempoExternalEndpoint struct { - // The access key id of the external tempo backend + // The access key ID (username) for the external S3-compatible bucket. AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` - // The bucket name of the external tempo backend + // The bucket name of the external S3-compatible object storage. BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` - // The endpoint of the external tempo backend + // The external S3-compatible endpoint for Tempo's storage. Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` - // If true, the external tempo backend will not use tls + // If true, will use HTTP as protocol instead of HTTPS. Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` - // The secret access key of the external tempo backend + // The secret access key (password) for the external S3-compatible bucket. SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` } @@ -1266,88 +1461,98 @@ const ( ) type SpecInfrastructure struct { - // This key defines the VPC that will be created in AWS + // Vpc corresponds to the JSON schema field "vpc". Vpc *SpecInfrastructureVpc `json:"vpc,omitempty" yaml:"vpc,omitempty" mapstructure:"vpc,omitempty"` - // This section defines the creation of VPN bastions + // Vpn corresponds to the JSON schema field "vpn". Vpn *SpecInfrastructureVpn `json:"vpn,omitempty" yaml:"vpn,omitempty" mapstructure:"vpn,omitempty"` } +// Configuration for the VPC that will be created to host the EKS cluster and its +// related resources. If you already have a VPC that you want to use, leave this +// section empty and use `.spec.kubernetes.vpcId` instead. type SpecInfrastructureVpc struct { // Network corresponds to the JSON schema field "network". Network SpecInfrastructureVpcNetwork `json:"network" yaml:"network" mapstructure:"network"` } type SpecInfrastructureVpcNetwork struct { - // This is the CIDR of the VPC that will be created + // The network CIDR for the VPC that will be created Cidr TypesCidr `json:"cidr" yaml:"cidr" mapstructure:"cidr"` // SubnetsCidrs corresponds to the JSON schema field "subnetsCidrs". SubnetsCidrs SpecInfrastructureVpcNetworkSubnetsCidrs `json:"subnetsCidrs" yaml:"subnetsCidrs" mapstructure:"subnetsCidrs"` } +// Network CIDRS configuration for private and public subnets. type SpecInfrastructureVpcNetworkSubnetsCidrs struct { - // These are the CIRDs for the private subnets, where the nodes, the pods, and the + // The network CIDRs for the private subnets, where the nodes, the pods, and the // private load balancers will be created Private []TypesCidr `json:"private" yaml:"private" mapstructure:"private"` - // These are the CIDRs for the public subnets, where the public load balancers and + // The network CIDRs for the public subnets, where the public load balancers and // the VPN servers will be created Public []TypesCidr `json:"public" yaml:"public" mapstructure:"public"` } +// Configuration for the VPN server instances. type SpecInfrastructureVpn struct { - // This value defines the prefix that will be used to create the bucket name where - // the VPN servers will store the states + // This value defines the prefix for the bucket name where the VPN servers will + // store their state (VPN certificates, users). BucketNamePrefix *TypesAwsS3BucketNamePrefix `json:"bucketNamePrefix,omitempty" yaml:"bucketNamePrefix,omitempty" mapstructure:"bucketNamePrefix,omitempty"` - // The dhParamsBits size used for the creation of the .pem file that will be used - // in the dh openvpn server.conf file + // The `dhParamsBits` size used for the creation of the .pem file that will be + // used in the dh openvpn server.conf file. DhParamsBits *int `json:"dhParamsBits,omitempty" yaml:"dhParamsBits,omitempty" mapstructure:"dhParamsBits,omitempty"` - // The size of the disk in GB + // The size of the disk in GB for each VPN server. Example: entering `50` will + // create disks of 50 GB. DiskSize *int `json:"diskSize,omitempty" yaml:"diskSize,omitempty" mapstructure:"diskSize,omitempty"` - // Overrides the default IAM user name for the VPN + // Overrides IAM user name for the VPN. Default is to use the cluster name. IamUserNameOverride *TypesAwsIamRoleName `json:"iamUserNameOverride,omitempty" yaml:"iamUserNameOverride,omitempty" mapstructure:"iamUserNameOverride,omitempty"` - // The size of the AWS EC2 instance + // The type of the AWS EC2 instance for each VPN server. Follows AWS EC2 + // nomenclature. Example: `t3-micro`. InstanceType *string `json:"instanceType,omitempty" yaml:"instanceType,omitempty" mapstructure:"instanceType,omitempty"` - // The number of instances to create, 0 to skip the creation + // The number of VPN server instances to create, `0` to skip the creation. Instances *int `json:"instances,omitempty" yaml:"instances,omitempty" mapstructure:"instances,omitempty"` - // The username of the account to create in the bastion's operating system + // The username of the account to create in the bastion's operating system. OperatorName *string `json:"operatorName,omitempty" yaml:"operatorName,omitempty" mapstructure:"operatorName,omitempty"` - // The port used by the OpenVPN server + // The port where each OpenVPN server will listen for connections. Port *TypesTcpPort `json:"port,omitempty" yaml:"port,omitempty" mapstructure:"port,omitempty"` // Ssh corresponds to the JSON schema field "ssh". Ssh SpecInfrastructureVpnSsh `json:"ssh" yaml:"ssh" mapstructure:"ssh"` - // The VPC ID where the VPN servers will be created, required only if - // .spec.infrastructure.vpc is omitted + // The ID of the VPC where the VPN server instances will be created, required only + // if `.spec.infrastructure.vpc` is omitted. VpcId *TypesAwsVpcId `json:"vpcId,omitempty" yaml:"vpcId,omitempty" mapstructure:"vpcId,omitempty"` - // The CIDR that will be used to assign IP addresses to the VPN clients when - // connected + // The network CIDR that will be used to assign IP addresses to the VPN clients + // when connected. VpnClientsSubnetCidr TypesCidr `json:"vpnClientsSubnetCidr" yaml:"vpnClientsSubnetCidr" mapstructure:"vpnClientsSubnetCidr"` } type SpecInfrastructureVpnSsh struct { - // The CIDR enabled in the security group that can access the bastions in SSH + // The network CIDR enabled in the security group to access the VPN servers + // (bastions) via SSH. Setting this to `0.0.0.0/0` will allow any source. AllowedFromCidrs []TypesCidr `json:"allowedFromCidrs" yaml:"allowedFromCidrs" mapstructure:"allowedFromCidrs"` - // The github user name list that will be used to get the ssh public key that will - // be added as authorized key to the operatorName user + // List of GitHub usernames from whom get their SSH public key and add as + // authorized keys of the `operatorName` user. GithubUsersName []string `json:"githubUsersName" yaml:"githubUsersName" mapstructure:"githubUsersName"` - // This value defines the public keys that will be added to the bastion's - // operating system NOTES: Not yet implemented + // **NOT IN USE**, use `githubUsersName` instead. This value defines the public + // keys that will be added to the bastion's operating system. PublicKeys []interface{} `json:"publicKeys,omitempty" yaml:"publicKeys,omitempty" mapstructure:"publicKeys,omitempty"` } +// Defines the Kubernetes components configuration and the values needed for the +// `kubernetes` phase of furyctl. type SpecKubernetes struct { // ApiServer corresponds to the JSON schema field "apiServer". ApiServer SpecKubernetesAPIServer `json:"apiServer" yaml:"apiServer" mapstructure:"apiServer"` @@ -1355,71 +1560,85 @@ type SpecKubernetes struct { // AwsAuth corresponds to the JSON schema field "awsAuth". AwsAuth *SpecKubernetesAwsAuth `json:"awsAuth,omitempty" yaml:"awsAuth,omitempty" mapstructure:"awsAuth,omitempty"` - // Overrides the default IAM role name prefix for the EKS cluster + // Overrides the default prefix for the IAM role name of the EKS cluster. If not + // set, a name will be generated from the cluster name. ClusterIAMRoleNamePrefixOverride *TypesAwsIamRoleNamePrefix `json:"clusterIAMRoleNamePrefixOverride,omitempty" yaml:"clusterIAMRoleNamePrefixOverride,omitempty" mapstructure:"clusterIAMRoleNamePrefixOverride,omitempty"` - // Optional Kubernetes Cluster log retention in days. Defaults to 90 days. - LogRetentionDays *int `json:"logRetentionDays,omitempty" yaml:"logRetentionDays,omitempty" mapstructure:"logRetentionDays,omitempty"` + // Optional Kubernetes Cluster log retention in CloudWatch, expressed in days. + // Setting the value to zero (`0`) makes retention last forever. Default is `90` + // days. + LogRetentionDays *SpecKubernetesLogRetentionDays `json:"logRetentionDays,omitempty" yaml:"logRetentionDays,omitempty" mapstructure:"logRetentionDays,omitempty"` // Optional list of Kubernetes Cluster log types to enable. Defaults to all types. LogsTypes []SpecKubernetesLogsTypesElem `json:"logsTypes,omitempty" yaml:"logsTypes,omitempty" mapstructure:"logsTypes,omitempty"` - // This key contains the ssh public key that can connect to the nodes via SSH - // using the ec2-user user + // The SSH public key that can connect to the nodes via SSH using the `ec2-user` + // user. Example: the contents of your `~/.ssh/id_ras.pub` file. NodeAllowedSshPublicKey interface{} `json:"nodeAllowedSshPublicKey" yaml:"nodeAllowedSshPublicKey" mapstructure:"nodeAllowedSshPublicKey"` + // Global default AMI type used for EKS worker nodes. This will apply to all node + // pools unless overridden by a specific node pool. + NodePoolGlobalAmiType SpecKubernetesNodePoolGlobalAmiType `json:"nodePoolGlobalAmiType" yaml:"nodePoolGlobalAmiType" mapstructure:"nodePoolGlobalAmiType"` + // NodePools corresponds to the JSON schema field "nodePools". NodePools []SpecKubernetesNodePool `json:"nodePools" yaml:"nodePools" mapstructure:"nodePools"` - // Either `launch_configurations`, `launch_templates` or `both`. For new clusters - // use `launch_templates`, for existing cluster you'll need to migrate from - // `launch_configurations` to `launch_templates` using `both` as interim. + // Accepted values are `launch_configurations`, `launch_templates` or `both`. For + // new clusters use `launch_templates`, for adopting an existing cluster you'll + // need to migrate from `launch_configurations` to `launch_templates` using `both` + // as interim. NodePoolsLaunchKind SpecKubernetesNodePoolsLaunchKind `json:"nodePoolsLaunchKind" yaml:"nodePoolsLaunchKind" mapstructure:"nodePoolsLaunchKind"` - // This value defines the CIDR that will be used to assign IP addresses to the - // services + // This value defines the network CIDR that will be used to assign IP addresses to + // Kubernetes services. ServiceIpV4Cidr *TypesCidr `json:"serviceIpV4Cidr,omitempty" yaml:"serviceIpV4Cidr,omitempty" mapstructure:"serviceIpV4Cidr,omitempty"` - // This value defines the subnet IDs where the EKS cluster will be created, - // required only if .spec.infrastructure.vpc is omitted + // Required only if `.spec.infrastructure.vpc` is omitted. This value defines the + // ID of the subnet where the EKS cluster will be created. SubnetIds []TypesAwsSubnetId `json:"subnetIds,omitempty" yaml:"subnetIds,omitempty" mapstructure:"subnetIds,omitempty"` - // This value defines the VPC ID where the EKS cluster will be created, required - // only if .spec.infrastructure.vpc is omitted + // Required only if `.spec.infrastructure.vpc` is omitted. This value defines the + // ID of the VPC where the EKS cluster and its related resources will be created. VpcId *TypesAwsVpcId `json:"vpcId,omitempty" yaml:"vpcId,omitempty" mapstructure:"vpcId,omitempty"` - // Overrides the default IAM role name prefix for the EKS workers + // Overrides the default prefix for the IAM role name of the EKS workers. If not + // set, a name will be generated from the cluster name. WorkersIAMRoleNamePrefixOverride *TypesAwsIamRoleNamePrefix `json:"workersIAMRoleNamePrefixOverride,omitempty" yaml:"workersIAMRoleNamePrefixOverride,omitempty" mapstructure:"workersIAMRoleNamePrefixOverride,omitempty"` } type SpecKubernetesAPIServer struct { - // This value defines if the API server will be accessible only from the private - // subnets + // This value defines if the Kubernetes API server will be accessible from the + // private subnets. Default it `true`. PrivateAccess bool `json:"privateAccess" yaml:"privateAccess" mapstructure:"privateAccess"` - // This value defines the CIDRs that will be allowed to access the API server from - // the private subnets + // The network CIDRs from the private subnets that will be allowed access the + // Kubernetes API server. PrivateAccessCidrs []TypesCidr `json:"privateAccessCidrs,omitempty" yaml:"privateAccessCidrs,omitempty" mapstructure:"privateAccessCidrs,omitempty"` - // This value defines if the API server will be accessible from the public subnets + // This value defines if the Kubernetes API server will be accessible from the + // public subnets. Default is `false`. PublicAccess bool `json:"publicAccess" yaml:"publicAccess" mapstructure:"publicAccess"` - // This value defines the CIDRs that will be allowed to access the API server from - // the public subnets + // The network CIDRs from the public subnets that will be allowed access the + // Kubernetes API server. PublicAccessCidrs []TypesCidr `json:"publicAccessCidrs,omitempty" yaml:"publicAccessCidrs,omitempty" mapstructure:"publicAccessCidrs,omitempty"` } +// Optional additional security configuration for EKS IAM via the `aws-auth` +// configmap. +// +// Ref: https://docs.aws.amazon.com/eks/latest/userguide/auth-configmap.html type SpecKubernetesAwsAuth struct { // This optional array defines additional AWS accounts that will be added to the - // aws-auth configmap + // `aws-auth` configmap. AdditionalAccounts []string `json:"additionalAccounts,omitempty" yaml:"additionalAccounts,omitempty" mapstructure:"additionalAccounts,omitempty"` // This optional array defines additional IAM roles that will be added to the - // aws-auth configmap + // `aws-auth` configmap. Roles []SpecKubernetesAwsAuthRole `json:"roles,omitempty" yaml:"roles,omitempty" mapstructure:"roles,omitempty"` // This optional array defines additional IAM users that will be added to the - // aws-auth configmap + // `aws-auth` configmap. Users []SpecKubernetesAwsAuthUser `json:"users,omitempty" yaml:"users,omitempty" mapstructure:"users,omitempty"` } @@ -1445,6 +1664,8 @@ type SpecKubernetesAwsAuthUser struct { Username string `json:"username" yaml:"username" mapstructure:"username"` } +type SpecKubernetesLogRetentionDays int + type SpecKubernetesLogsTypesElem string const ( @@ -1455,6 +1676,8 @@ const ( SpecKubernetesLogsTypesElemScheduler SpecKubernetesLogsTypesElem = "scheduler" ) +// Array with all the node pool definitions that will join the cluster. Each item +// is an object. type SpecKubernetesNodePool struct { // AdditionalFirewallRules corresponds to the JSON schema field // "additionalFirewallRules". @@ -1464,35 +1687,38 @@ type SpecKubernetesNodePool struct { Ami *SpecKubernetesNodePoolAmi `json:"ami,omitempty" yaml:"ami,omitempty" mapstructure:"ami,omitempty"` // This optional array defines additional target groups to attach to the instances - // in the node pool + // in the node pool. AttachedTargetGroups []TypesAwsArn `json:"attachedTargetGroups,omitempty" yaml:"attachedTargetGroups,omitempty" mapstructure:"attachedTargetGroups,omitempty"` - // The container runtime to use for the nodes + // The container runtime to use in the nodes of the node pool. Default is + // `containerd`. ContainerRuntime *SpecKubernetesNodePoolContainerRuntime `json:"containerRuntime,omitempty" yaml:"containerRuntime,omitempty" mapstructure:"containerRuntime,omitempty"` // Instance corresponds to the JSON schema field "instance". Instance SpecKubernetesNodePoolInstance `json:"instance" yaml:"instance" mapstructure:"instance"` - // Kubernetes labels that will be added to the nodes + // Kubernetes labels that will be added to the nodes. Labels TypesKubeLabels_1 `json:"labels,omitempty" yaml:"labels,omitempty" mapstructure:"labels,omitempty"` - // The name of the node pool + // The name of the node pool. Name string `json:"name" yaml:"name" mapstructure:"name"` // Size corresponds to the JSON schema field "size". Size SpecKubernetesNodePoolSize `json:"size" yaml:"size" mapstructure:"size"` - // This value defines the subnet IDs where the nodes will be created + // Optional list of subnet IDs where to create the nodes. SubnetIds []TypesAwsSubnetId `json:"subnetIds,omitempty" yaml:"subnetIds,omitempty" mapstructure:"subnetIds,omitempty"` - // AWS tags that will be added to the ASG and EC2 instances + // AWS tags that will be added to the ASG and EC2 instances. Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"` - // Kubernetes taints that will be added to the nodes + // Kubernetes taints that will be added to the nodes. Taints TypesKubeTaints `json:"taints,omitempty" yaml:"taints,omitempty" mapstructure:"taints,omitempty"` - // Type corresponds to the JSON schema field "type". - Type *SpecKubernetesNodePoolType `json:"type,omitempty" yaml:"type,omitempty" mapstructure:"type,omitempty"` + // The type of Node Pool, can be `self-managed` for using customization like + // custom AMI, set max pods per node or `eks-managed` for using prebuilt AMIs from + // Amazon via the `ami.type` field. It is recommended to use `self-managed`. + Type SpecKubernetesNodePoolType `json:"type" yaml:"type" mapstructure:"type"` } type SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock struct { @@ -1508,10 +1734,11 @@ type SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock struct { // Protocol corresponds to the JSON schema field "protocol". Protocol TypesAwsIpProtocol `json:"protocol" yaml:"protocol" mapstructure:"protocol"` - // Tags corresponds to the JSON schema field "tags". + // Additional AWS tags for the Firewall rule. Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"` - // Type corresponds to the JSON schema field "type". + // The type of the Firewall rule, can be `ingress` for incoming traffic or + // `egress` for outgoing traffic. Type SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType `json:"type" yaml:"type" mapstructure:"type"` } @@ -1522,6 +1749,7 @@ const ( SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockTypeIngress SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType = "ingress" ) +// Port range for the Firewall Rule. type SpecKubernetesNodePoolAdditionalFirewallRulePorts struct { // From corresponds to the JSON schema field "from". From TypesTcpPort `json:"from" yaml:"from" mapstructure:"from"` @@ -1531,22 +1759,23 @@ type SpecKubernetesNodePoolAdditionalFirewallRulePorts struct { } type SpecKubernetesNodePoolAdditionalFirewallRuleSelf struct { - // The name of the FW rule + // The name of the Firewall rule. Name string `json:"name" yaml:"name" mapstructure:"name"` // Ports corresponds to the JSON schema field "ports". Ports SpecKubernetesNodePoolAdditionalFirewallRulePorts `json:"ports" yaml:"ports" mapstructure:"ports"` - // The protocol of the FW rule + // The protocol of the Firewall rule. Protocol TypesAwsIpProtocol `json:"protocol" yaml:"protocol" mapstructure:"protocol"` - // If true, the source will be the security group itself + // If `true`, the source will be the security group itself. Self bool `json:"self" yaml:"self" mapstructure:"self"` - // The tags of the FW rule + // Additional AWS tags for the Firewall rule. Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"` - // The type of the FW rule can be ingress or egress + // The type of the Firewall rule, can be `ingress` for incoming traffic or + // `egress` for outgoing traffic. Type SpecKubernetesNodePoolAdditionalFirewallRuleSelfType `json:"type" yaml:"type" mapstructure:"type"` } @@ -1558,22 +1787,23 @@ const ( ) type SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId struct { - // The name of the FW rule + // The name for the additional Firewall rule Security Group. Name string `json:"name" yaml:"name" mapstructure:"name"` // Ports corresponds to the JSON schema field "ports". Ports SpecKubernetesNodePoolAdditionalFirewallRulePorts `json:"ports" yaml:"ports" mapstructure:"ports"` - // The protocol of the FW rule + // The protocol of the Firewall rule. Protocol TypesAwsIpProtocol `json:"protocol" yaml:"protocol" mapstructure:"protocol"` - // The source security group ID + // The source security group ID. SourceSecurityGroupId string `json:"sourceSecurityGroupId" yaml:"sourceSecurityGroupId" mapstructure:"sourceSecurityGroupId"` - // The tags of the FW rule + // Additional AWS tags for the Firewall rule. Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"` - // The type of the FW rule can be ingress or egress + // The type of the Firewall rule, can be `ingress` for incoming traffic or + // `egress` for outgoing traffic. Type SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType `json:"type" yaml:"type" mapstructure:"type"` } @@ -1584,9 +1814,11 @@ const ( SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdTypeIngress SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType = "ingress" ) +// Optional additional firewall rules that will be attached to the nodes. type SpecKubernetesNodePoolAdditionalFirewallRules struct { - // The CIDR blocks for the FW rule. At the moment the first item of the list will - // be used, others will be ignored. + // The CIDR blocks objects definition for the Firewall rule. Even though it is a + // list, only one item is currently supported. See + // https://github.com/sighupio/fury-eks-installer/issues/46 for more details. CidrBlocks []SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock `json:"cidrBlocks,omitempty" yaml:"cidrBlocks,omitempty" mapstructure:"cidrBlocks,omitempty"` // Self corresponds to the JSON schema field "self". @@ -1597,14 +1829,36 @@ type SpecKubernetesNodePoolAdditionalFirewallRules struct { SourceSecurityGroupId []SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId `json:"sourceSecurityGroupId,omitempty" yaml:"sourceSecurityGroupId,omitempty" mapstructure:"sourceSecurityGroupId,omitempty"` } +// Configuration for customize the Amazon Machine Image (AMI) for the machines of +// the Node Pool. +// +// The AMI can be chosen either by specifing the `ami.id` and `ami.owner` fields +// for using a custom AMI (just with `self-managed` node pool type) or by setting +// the `ami.type` field to one of the official AMIs based on Amazon Linux. type SpecKubernetesNodePoolAmi struct { - // The AMI ID to use for the nodes - Id string `json:"id" yaml:"id" mapstructure:"id"` + // The ID of the AMI to use for the nodes, must be set toghether with the `owner` + // field. `ami.id` and `ami.owner` can be only set when Node Pool type is + // `self-managed` and they can't be set at the same time than `ami.type`. + Id *string `json:"id,omitempty" yaml:"id,omitempty" mapstructure:"id,omitempty"` + + // The owner of the AMI to use for the nodes, must be set toghether with the `id` + // field. `ami.id` and `ami.owner` can be only set when Node Pool type is + // `self-managed` and they can't be set at the same time than `ami.type`. + Owner *string `json:"owner,omitempty" yaml:"owner,omitempty" mapstructure:"owner,omitempty"` - // The owner of the AMI - Owner string `json:"owner" yaml:"owner" mapstructure:"owner"` + // The AMI type defines the AMI to use for `eks-managed` and `self-managed` type + // of Node Pools. Only Amazon Linux based AMIs are supported. It can't be set at + // the same time than `ami.id` and `ami.owner`. + Type *SpecKubernetesNodePoolAmiType `json:"type,omitempty" yaml:"type,omitempty" mapstructure:"type,omitempty"` } +type SpecKubernetesNodePoolAmiType string + +const ( + SpecKubernetesNodePoolAmiTypeAlinux2 SpecKubernetesNodePoolAmiType = "alinux2" + SpecKubernetesNodePoolAmiTypeAlinux2023 SpecKubernetesNodePoolAmiType = "alinux2023" +) + type SpecKubernetesNodePoolContainerRuntime string const ( @@ -1612,20 +1866,32 @@ const ( SpecKubernetesNodePoolContainerRuntimeDocker SpecKubernetesNodePoolContainerRuntime = "docker" ) +type SpecKubernetesNodePoolGlobalAmiType string + +const ( + SpecKubernetesNodePoolGlobalAmiTypeAlinux2 SpecKubernetesNodePoolGlobalAmiType = "alinux2" + SpecKubernetesNodePoolGlobalAmiTypeAlinux2023 SpecKubernetesNodePoolGlobalAmiType = "alinux2023" +) + +// Configuration for the instances that will be used in the node pool. type SpecKubernetesNodePoolInstance struct { - // MaxPods corresponds to the JSON schema field "maxPods". + // Set the maximum pods per node to a custom value. If not set will use EKS + // default value that depends on the instance type. + // + // Ref: + // https://github.com/awslabs/amazon-eks-ami/blob/main/templates/shared/runtime/eni-max-pods.txt MaxPods *int `json:"maxPods,omitempty" yaml:"maxPods,omitempty" mapstructure:"maxPods,omitempty"` - // If true, the nodes will be created as spot instances + // If `true`, the nodes will be created as spot instances. Default is `false`. Spot *bool `json:"spot,omitempty" yaml:"spot,omitempty" mapstructure:"spot,omitempty"` - // The instance type to use for the nodes + // The instance type to use for the nodes. Type string `json:"type" yaml:"type" mapstructure:"type"` - // The size of the disk in GB + // The size of the disk in GB. VolumeSize *int `json:"volumeSize,omitempty" yaml:"volumeSize,omitempty" mapstructure:"volumeSize,omitempty"` - // VolumeType corresponds to the JSON schema field "volumeType". + // Volume type for the instance disk. Default is `gp2`. VolumeType *SpecKubernetesNodePoolInstanceVolumeType `json:"volumeType,omitempty" yaml:"volumeType,omitempty" mapstructure:"volumeType,omitempty"` } @@ -1639,10 +1905,10 @@ const ( ) type SpecKubernetesNodePoolSize struct { - // The maximum number of nodes in the node pool + // The maximum number of nodes in the node pool. Max int `json:"max" yaml:"max" mapstructure:"max"` - // The minimum number of nodes in the node pool + // The minimum number of nodes in the node pool. Min int `json:"min" yaml:"min" mapstructure:"min"` } @@ -1681,6 +1947,10 @@ type SpecPluginsHelmReleases []struct { // The chart of the release Chart string `json:"chart" yaml:"chart" mapstructure:"chart"` + // Disable running `helm diff` validation when installing the plugin, it will + // still be done when upgrading. + DisableValidationOnInstall *bool `json:"disableValidationOnInstall,omitempty" yaml:"disableValidationOnInstall,omitempty" mapstructure:"disableValidationOnInstall,omitempty"` + // The name of the release Name string `json:"name" yaml:"name" mapstructure:"name"` @@ -1731,24 +2001,26 @@ type SpecToolsConfigurationTerraform struct { State SpecToolsConfigurationTerraformState `json:"state" yaml:"state" mapstructure:"state"` } +// Configuration for storing the Terraform state of the cluster. type SpecToolsConfigurationTerraformState struct { // S3 corresponds to the JSON schema field "s3". S3 SpecToolsConfigurationTerraformStateS3 `json:"s3" yaml:"s3" mapstructure:"s3"` } +// Configuration for the S3 bucket used to store the Terraform state. type SpecToolsConfigurationTerraformStateS3 struct { - // This value defines which bucket will be used to store all the states + // This value defines which bucket will be used to store all the states. BucketName TypesAwsS3BucketName `json:"bucketName" yaml:"bucketName" mapstructure:"bucketName"` // This value defines which folder will be used to store all the states inside the - // bucket + // bucket. KeyPrefix TypesAwsS3KeyPrefix `json:"keyPrefix" yaml:"keyPrefix" mapstructure:"keyPrefix"` - // This value defines in which region the bucket is located + // This value defines in which region the bucket is located. Region TypesAwsRegion `json:"region" yaml:"region" mapstructure:"region"` // This value defines if the region of the bucket should be validated or not by - // Terraform, useful when using a bucket in a recently added region + // Terraform, useful when using a bucket in a recently added region. SkipRegionValidation *bool `json:"skipRegionValidation,omitempty" yaml:"skipRegionValidation,omitempty" mapstructure:"skipRegionValidation,omitempty"` } @@ -1815,10 +2087,10 @@ type TypesEnvRef string type TypesFileRef string type TypesFuryModuleComponentOverrides struct { - // The node selector to use to place the pods for the minio module + // Set to override the node selector used to place the pods of the package. NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - // The tolerations that will be added to the pods for the cert-manager module + // Set to override the tolerations that will be added to the pods of the package. Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } @@ -1827,11 +2099,11 @@ type TypesFuryModuleComponentOverridesWithIAMRoleName struct { IamRoleName *TypesAwsIamRoleName `json:"iamRoleName,omitempty" yaml:"iamRoleName,omitempty" mapstructure:"iamRoleName,omitempty"` // The node selector to use to place the pods for the load balancer controller - // module + // module. NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` // The tolerations that will be added to the pods for the cluster autoscaler - // module + // module. Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } @@ -1843,25 +2115,28 @@ type TypesFuryModuleComponentOverrides_1 struct { Tolerations []TypesKubeToleration_1 `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } +// Override the common configuration with a particular configuration for the +// module. type TypesFuryModuleOverrides struct { // Ingresses corresponds to the JSON schema field "ingresses". Ingresses TypesFuryModuleOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` - // The node selector to use to place the pods for the dr module + // Set to override the node selector used to place the pods of the module. NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - // The tolerations that will be added to the pods for the monitoring module + // Set to override the tolerations that will be added to the pods of the module. Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } type TypesFuryModuleOverridesIngress struct { - // If true, the ingress will not have authentication + // If true, the ingress will not have authentication even if + // `.spec.modules.auth.provider.type` is SSO or Basic Auth. DisableAuth *bool `json:"disableAuth,omitempty" yaml:"disableAuth,omitempty" mapstructure:"disableAuth,omitempty"` - // The host of the ingress + // Use this host for the ingress instead of the default one. Host *string `json:"host,omitempty" yaml:"host,omitempty" mapstructure:"host,omitempty"` - // The ingress class of the ingress + // Use this ingress class for the ingress instead of the default one. IngressClass *string `json:"ingressClass,omitempty" yaml:"ingressClass,omitempty" mapstructure:"ingressClass,omitempty"` } @@ -1886,18 +2161,18 @@ type TypesKubeResources struct { } type TypesKubeResourcesLimits struct { - // The cpu limit for the opensearch pods + // The CPU limit for the Pod. Example: `1000m`. Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` - // The memory limit for the opensearch pods + // The memory limit for the Pod. Example: `1G`. Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` } type TypesKubeResourcesRequests struct { - // The cpu request for the prometheus pods + // The CPU request for the Pod, in cores. Example: `500m`. Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` - // The memory request for the opensearch pods + // The memory request for the Pod. Example: `500M`. Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` } @@ -2042,54 +2317,70 @@ var enumValues_SpecDistributionModulesMonitoringType = []interface{}{ "mimir", } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSelfType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType, v) - } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleSelfType(v) - return nil +var enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction = []interface{}{ + "deny", + "dryrun", + "warn", } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSelf) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") - } - if v, ok := raw["ports"]; !ok || v == nil { - return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") - } - if v, ok := raw["protocol"]; !ok || v == nil { - return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") - } - if v, ok := raw["self"]; !ok || v == nil { - return fmt.Errorf("field self in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") - } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") - } - type Plain SpecKubernetesNodePoolAdditionalFirewallRuleSelf - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleSelf(plain) - return nil +var enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction = []interface{}{ + "Audit", + "Enforce", +} + +var enumValues_SpecDistributionModulesPolicyType = []interface{}{ + "none", + "gatekeeper", + "kyverno", +} + +var enumValues_SpecDistributionModulesTracingTempoBackend = []interface{}{ + "minio", + "externalEndpoint", +} + +var enumValues_SpecDistributionModulesTracingType = []interface{}{ + "none", + "tempo", +} + +var enumValues_SpecKubernetesLogRetentionDays = []interface{}{ + 0, + 1, + 3, + 5, + 7, + 14, + 30, + 60, + 90, + 120, + 150, + 180, + 365, + 400, + 545, + 731, + 1096, + 1827, + 2192, + 2557, + 2922, + 3288, + 3653, +} + +var enumValues_SpecKubernetesLogsTypesElem = []interface{}{ + "api", + "audit", + "authenticator", + "controllerManager", + "scheduler", +} + +var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType = []interface{}{ + "ingress", + "egress", } var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType = []interface{}{ @@ -2102,132 +2393,120 @@ var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId "egress", } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType, v) - } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType(v) - return nil +var enumValues_SpecKubernetesNodePoolAmiType = []interface{}{ + "alinux2", + "alinux2023", +} + +var enumValues_SpecKubernetesNodePoolContainerRuntime = []interface{}{ + "docker", + "containerd", +} + +var enumValues_SpecKubernetesNodePoolGlobalAmiType = []interface{}{ + "alinux2", + "alinux2023", +} + +var enumValues_SpecKubernetesNodePoolInstanceVolumeType = []interface{}{ + "gp2", + "gp3", + "io1", + "standard", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionCustomPatchesConfigMapGeneratorResource) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["cidrBlocks"]; !ok || v == nil { - return fmt.Errorf("field cidrBlocks in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") - } if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") - } - if v, ok := raw["ports"]; !ok || v == nil { - return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") - } - if v, ok := raw["protocol"]; !ok || v == nil { - return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") - } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") + return fmt.Errorf("field name in SpecDistributionCustomPatchesConfigMapGeneratorResource: required") } - type Plain SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock + type Plain SpecDistributionCustomPatchesConfigMapGeneratorResource var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - if plain.CidrBlocks != nil && len(plain.CidrBlocks) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "cidrBlocks", 1) - } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock(plain) + *j = SpecDistributionCustomPatchesConfigMapGeneratorResource(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressCertManagerClusterIssuerType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType { + for _, expected := range enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType, v) } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType(v) + *j = SpecDistributionModulesIngressCertManagerClusterIssuerType(v) return nil } -var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType = []interface{}{ - "ingress", - "egress", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesDr) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") - } - if v, ok := raw["ports"]; !ok || v == nil { - return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesDr: required") } - if v, ok := raw["protocol"]; !ok || v == nil { - return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") + type Plain SpecDistributionModulesDr + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err } - if v, ok := raw["sourceSecurityGroupId"]; !ok || v == nil { - return fmt.Errorf("field sourceSecurityGroupId in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") + *j = SpecDistributionModulesDr(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesDrVelero) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") + if v, ok := raw["eks"]; !ok || v == nil { + return fmt.Errorf("field eks in SpecDistributionModulesDrVelero: required") } - type Plain SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId + type Plain SpecDistributionModulesDrVelero var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId(plain) + *j = SpecDistributionModulesDrVelero(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRulePorts) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesDrVeleroEks) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["from"]; !ok || v == nil { - return fmt.Errorf("field from in SpecKubernetesNodePoolAdditionalFirewallRulePorts: required") + if v, ok := raw["bucketName"]; !ok || v == nil { + return fmt.Errorf("field bucketName in SpecDistributionModulesDrVeleroEks: required") } - if v, ok := raw["to"]; !ok || v == nil { - return fmt.Errorf("field to in SpecKubernetesNodePoolAdditionalFirewallRulePorts: required") + if v, ok := raw["region"]; !ok || v == nil { + return fmt.Errorf("field region in SpecDistributionModulesDrVeleroEks: required") } - type Plain SpecKubernetesNodePoolAdditionalFirewallRulePorts + type Plain SpecDistributionModulesDrVeleroEks var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesNodePoolAdditionalFirewallRulePorts(plain) + *j = SpecDistributionModulesDrVeleroEks(plain) return nil } @@ -2245,6 +2524,9 @@ func (j *SpecKubernetesNodePoolAdditionalFirewallRules) UnmarshalJSON(b []byte) if plain.CidrBlocks != nil && len(plain.CidrBlocks) < 1 { return fmt.Errorf("field %s length: must be >= %d", "cidrBlocks", 1) } + if len(plain.CidrBlocks) > 1 { + return fmt.Errorf("field %s length: must be <= %d", "cidrBlocks", 1) + } if plain.Self != nil && len(plain.Self) < 1 { return fmt.Errorf("field %s length: must be >= %d", "self", 1) } @@ -2256,155 +2538,114 @@ func (j *SpecKubernetesNodePoolAdditionalFirewallRules) UnmarshalJSON(b []byte) } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesLogsTypesElem) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolContainerRuntime) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecKubernetesLogsTypesElem { + for _, expected := range enumValues_SpecKubernetesNodePoolContainerRuntime { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesLogsTypesElem, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolContainerRuntime, v) } - *j = SpecKubernetesLogsTypesElem(v) + *j = SpecKubernetesNodePoolContainerRuntime(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAmi) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressCertManagerClusterIssuer) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["id"]; !ok || v == nil { - return fmt.Errorf("field id in SpecKubernetesNodePoolAmi: required") + if v, ok := raw["email"]; !ok || v == nil { + return fmt.Errorf("field email in SpecDistributionModulesIngressCertManagerClusterIssuer: required") } - if v, ok := raw["owner"]; !ok || v == nil { - return fmt.Errorf("field owner in SpecKubernetesNodePoolAmi: required") + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionModulesIngressCertManagerClusterIssuer: required") } - type Plain SpecKubernetesNodePoolAmi + type Plain SpecDistributionModulesIngressCertManagerClusterIssuer var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesNodePoolAmi(plain) - return nil -} - -var enumValues_SpecKubernetesLogsTypesElem = []interface{}{ - "api", - "audit", - "authenticator", - "controllerManager", - "scheduler", -} - -var enumValues_SpecKubernetesNodePoolContainerRuntime = []interface{}{ - "docker", - "containerd", -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolContainerRuntime) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolContainerRuntime { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolContainerRuntime, v) - } - *j = SpecKubernetesNodePoolContainerRuntime(v) + *j = SpecDistributionModulesIngressCertManagerClusterIssuer(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesAwsAuthUser) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["groups"]; !ok || v == nil { - return fmt.Errorf("field groups in SpecKubernetesAwsAuthUser: required") + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") } - if v, ok := raw["userarn"]; !ok || v == nil { - return fmt.Errorf("field userarn in SpecKubernetesAwsAuthUser: required") + if v, ok := raw["ports"]; !ok || v == nil { + return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") } - if v, ok := raw["username"]; !ok || v == nil { - return fmt.Errorf("field username in SpecKubernetesAwsAuthUser: required") + if v, ok := raw["protocol"]; !ok || v == nil { + return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") } - type Plain SpecKubernetesAwsAuthUser + if v, ok := raw["sourceSecurityGroupId"]; !ok || v == nil { + return fmt.Errorf("field sourceSecurityGroupId in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") + } + type Plain SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesAwsAuthUser(plain) + *j = SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesAwsAuthRole) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressCertManager) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["groups"]; !ok || v == nil { - return fmt.Errorf("field groups in SpecKubernetesAwsAuthRole: required") - } - if v, ok := raw["rolearn"]; !ok || v == nil { - return fmt.Errorf("field rolearn in SpecKubernetesAwsAuthRole: required") - } - if v, ok := raw["username"]; !ok || v == nil { - return fmt.Errorf("field username in SpecKubernetesAwsAuthRole: required") + if v, ok := raw["clusterIssuer"]; !ok || v == nil { + return fmt.Errorf("field clusterIssuer in SpecDistributionModulesIngressCertManager: required") } - type Plain SpecKubernetesAwsAuthRole + type Plain SpecDistributionModulesIngressCertManager var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesAwsAuthRole(plain) + *j = SpecDistributionModulesIngressCertManager(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesAPIServer) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecDistributionModulesTracingType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["privateAccess"]; !ok || v == nil { - return fmt.Errorf("field privateAccess in SpecKubernetesAPIServer: required") - } - if v, ok := raw["publicAccess"]; !ok || v == nil { - return fmt.Errorf("field publicAccess in SpecKubernetesAPIServer: required") + var ok bool + for _, expected := range enumValues_SpecDistributionModulesTracingType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecKubernetesAPIServer - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingType, v) } - *j = SpecKubernetesAPIServer(plain) + *j = SpecDistributionModulesTracingType(v) return nil } -var enumValues_SpecKubernetesNodePoolInstanceVolumeType = []interface{}{ - "gp2", - "gp3", - "io1", - "standard", -} - // UnmarshalJSON implements json.Unmarshaler. func (j *SpecKubernetesNodePoolInstanceVolumeType) UnmarshalJSON(b []byte) error { var v string @@ -2426,235 +2667,268 @@ func (j *SpecKubernetesNodePoolInstanceVolumeType) UnmarshalJSON(b []byte) error } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecInfrastructureVpn) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressDNSPrivate) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["ssh"]; !ok || v == nil { - return fmt.Errorf("field ssh in SpecInfrastructureVpn: required") + if v, ok := raw["create"]; !ok || v == nil { + return fmt.Errorf("field create in SpecDistributionModulesIngressDNSPrivate: required") } - if v, ok := raw["vpnClientsSubnetCidr"]; !ok || v == nil { - return fmt.Errorf("field vpnClientsSubnetCidr in SpecInfrastructureVpn: required") + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionModulesIngressDNSPrivate: required") } - type Plain SpecInfrastructureVpn + type Plain SpecDistributionModulesIngressDNSPrivate var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecInfrastructureVpn(plain) + *j = SpecDistributionModulesIngressDNSPrivate(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecInfrastructureVpnSsh) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressDNSPublic) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["allowedFromCidrs"]; !ok || v == nil { - return fmt.Errorf("field allowedFromCidrs in SpecInfrastructureVpnSsh: required") + if v, ok := raw["create"]; !ok || v == nil { + return fmt.Errorf("field create in SpecDistributionModulesIngressDNSPublic: required") } - if v, ok := raw["githubUsersName"]; !ok || v == nil { - return fmt.Errorf("field githubUsersName in SpecInfrastructureVpnSsh: required") + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionModulesIngressDNSPublic: required") } - type Plain SpecInfrastructureVpnSsh + type Plain SpecDistributionModulesIngressDNSPublic var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - if plain.GithubUsersName != nil && len(plain.GithubUsersName) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "githubUsersName", 1) - } - *j = SpecInfrastructureVpnSsh(plain) + *j = SpecDistributionModulesIngressDNSPublic(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecInfrastructureVpc) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["network"]; !ok || v == nil { - return fmt.Errorf("field network in SpecInfrastructureVpc: required") + var ok bool + for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecInfrastructureVpc - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType, v) } - *j = SpecInfrastructureVpc(plain) + *j = SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecInfrastructureVpcNetwork) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *TypesAwsRegion) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["cidr"]; !ok || v == nil { - return fmt.Errorf("field cidr in SpecInfrastructureVpcNetwork: required") - } - if v, ok := raw["subnetsCidrs"]; !ok || v == nil { - return fmt.Errorf("field subnetsCidrs in SpecInfrastructureVpcNetwork: required") + var ok bool + for _, expected := range enumValues_TypesAwsRegion { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecInfrastructureVpcNetwork - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesAwsRegion, v) } - *j = SpecInfrastructureVpcNetwork(plain) + *j = TypesAwsRegion(v) return nil } +var enumValues_TypesAwsRegion = []interface{}{ + "af-south-1", + "ap-east-1", + "ap-northeast-1", + "ap-northeast-2", + "ap-northeast-3", + "ap-south-1", + "ap-south-2", + "ap-southeast-1", + "ap-southeast-2", + "ap-southeast-3", + "ap-southeast-4", + "ca-central-1", + "eu-central-1", + "eu-central-2", + "eu-north-1", + "eu-south-1", + "eu-south-2", + "eu-west-1", + "eu-west-2", + "eu-west-3", + "me-central-1", + "me-south-1", + "sa-east-1", + "us-east-1", + "us-east-2", + "us-gov-east-1", + "us-gov-west-1", + "us-west-1", + "us-west-2", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecInfrastructureVpcNetworkSubnetsCidrs) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolInstance) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["private"]; !ok || v == nil { - return fmt.Errorf("field private in SpecInfrastructureVpcNetworkSubnetsCidrs: required") - } - if v, ok := raw["public"]; !ok || v == nil { - return fmt.Errorf("field public in SpecInfrastructureVpcNetworkSubnetsCidrs: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecKubernetesNodePoolInstance: required") } - type Plain SpecInfrastructureVpcNetworkSubnetsCidrs + type Plain SpecKubernetesNodePoolInstance var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecInfrastructureVpcNetworkSubnetsCidrs(plain) + *j = SpecKubernetesNodePoolInstance(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolInstance) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSelf) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") + } + if v, ok := raw["ports"]; !ok || v == nil { + return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") + } + if v, ok := raw["protocol"]; !ok || v == nil { + return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") + } + if v, ok := raw["self"]; !ok || v == nil { + return fmt.Errorf("field self in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") + } if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecKubernetesNodePoolInstance: required") + return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") } - type Plain SpecKubernetesNodePoolInstance + type Plain SpecKubernetesNodePoolAdditionalFirewallRuleSelf var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesNodePoolInstance(plain) + *j = SpecKubernetesNodePoolAdditionalFirewallRuleSelf(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistribution) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecDistributionModulesIngressNginxTLSProvider) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["modules"]; !ok || v == nil { - return fmt.Errorf("field modules in SpecDistribution: required") + var ok bool + for _, expected := range enumValues_SpecDistributionModulesIngressNginxTLSProvider { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecDistribution - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxTLSProvider, v) } - *j = SpecDistribution(plain) + *j = SpecDistributionModulesIngressNginxTLSProvider(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModules) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolSize) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["dr"]; !ok || v == nil { - return fmt.Errorf("field dr in SpecDistributionModules: required") - } - if v, ok := raw["ingress"]; !ok || v == nil { - return fmt.Errorf("field ingress in SpecDistributionModules: required") - } - if v, ok := raw["logging"]; !ok || v == nil { - return fmt.Errorf("field logging in SpecDistributionModules: required") + if v, ok := raw["max"]; !ok || v == nil { + return fmt.Errorf("field max in SpecKubernetesNodePoolSize: required") } - if v, ok := raw["policy"]; !ok || v == nil { - return fmt.Errorf("field policy in SpecDistributionModules: required") + if v, ok := raw["min"]; !ok || v == nil { + return fmt.Errorf("field min in SpecKubernetesNodePoolSize: required") } - type Plain SpecDistributionModules + type Plain SpecKubernetesNodePoolSize var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModules(plain) + *j = SpecKubernetesNodePoolSize(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolSize) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressNginxTLSSecret) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["max"]; !ok || v == nil { - return fmt.Errorf("field max in SpecKubernetesNodePoolSize: required") + if v, ok := raw["ca"]; !ok || v == nil { + return fmt.Errorf("field ca in SpecDistributionModulesIngressNginxTLSSecret: required") } - if v, ok := raw["min"]; !ok || v == nil { - return fmt.Errorf("field min in SpecKubernetesNodePoolSize: required") + if v, ok := raw["cert"]; !ok || v == nil { + return fmt.Errorf("field cert in SpecDistributionModulesIngressNginxTLSSecret: required") } - type Plain SpecKubernetesNodePoolSize + if v, ok := raw["key"]; !ok || v == nil { + return fmt.Errorf("field key in SpecDistributionModulesIngressNginxTLSSecret: required") + } + type Plain SpecDistributionModulesIngressNginxTLSSecret var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesNodePoolSize(plain) + *j = SpecDistributionModulesIngressNginxTLSSecret(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesTracing) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressNginxTLS) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesTracing: required") + if v, ok := raw["provider"]; !ok || v == nil { + return fmt.Errorf("field provider in SpecDistributionModulesIngressNginxTLS: required") } - type Plain SpecDistributionModulesTracing + type Plain SpecDistributionModulesIngressNginxTLS var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesTracing(plain) + *j = SpecDistributionModulesIngressNginxTLS(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesTracingType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesDrType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesTracingType { + for _, expected := range enumValues_SpecDistributionModulesDrType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrType, v) } - *j = SpecDistributionModulesTracingType(v) - return nil -} - -var enumValues_SpecDistributionModulesTracingType = []interface{}{ - "none", - "tempo", + *j = SpecDistributionModulesDrType(v) + return nil } var enumValues_SpecKubernetesNodePoolType = []interface{}{ @@ -2683,45 +2957,75 @@ func (j *SpecKubernetesNodePoolType) UnmarshalJSON(b []byte) error { } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesTracingTempoBackend) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSelfType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesTracingTempoBackend { + for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingTempoBackend, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType, v) } - *j = SpecDistributionModulesTracingTempoBackend(v) + *j = SpecKubernetesNodePoolAdditionalFirewallRuleSelfType(v) return nil } -var enumValues_SpecDistributionModulesTracingTempoBackend = []interface{}{ - "minio", - "externalEndpoint", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicy) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } + if v, ok := raw["cidrBlocks"]; !ok || v == nil { + return fmt.Errorf("field cidrBlocks in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") + } + if v, ok := raw["ports"]; !ok || v == nil { + return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") + } + if v, ok := raw["protocol"]; !ok || v == nil { + return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") + } if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesPolicy: required") + return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") } - type Plain SpecDistributionModulesPolicy + type Plain SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesPolicy(plain) + if plain.CidrBlocks != nil && len(plain.CidrBlocks) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "cidrBlocks", 1) + } + *j = SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressNginxType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesIngressNginxType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxType, v) + } + *j = SpecDistributionModulesIngressNginxType(v) return nil } @@ -2740,6 +3044,9 @@ func (j *SpecKubernetesNodePool) UnmarshalJSON(b []byte) error { if v, ok := raw["size"]; !ok || v == nil { return fmt.Errorf("field size in SpecKubernetesNodePool: required") } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecKubernetesNodePool: required") + } type Plain SpecKubernetesNodePool var plain Plain if err := json.Unmarshal(b, &plain); err != nil { @@ -2750,22 +3057,20 @@ func (j *SpecKubernetesNodePool) UnmarshalJSON(b []byte) error { } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecDistributionModulesIngressNginx) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesPolicyType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesIngressNginx: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyType, v) + type Plain SpecDistributionModulesIngressNginx + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err } - *j = SpecDistributionModulesPolicyType(v) + *j = SpecDistributionModulesIngressNginx(plain) return nil } @@ -2795,58 +3100,83 @@ func (j *SpecKubernetesNodePoolsLaunchKind) UnmarshalJSON(b []byte) error { return nil } -var enumValues_SpecDistributionModulesPolicyType = []interface{}{ - "none", - "gatekeeper", - "kyverno", +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngress) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["baseDomain"]; !ok || v == nil { + return fmt.Errorf("field baseDomain in SpecDistributionModulesIngress: required") + } + if v, ok := raw["nginx"]; !ok || v == nil { + return fmt.Errorf("field nginx in SpecDistributionModulesIngress: required") + } + type Plain SpecDistributionModulesIngress + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngress(plain) + return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyKyverno) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuth) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { - return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyKyverno: required") + if v, ok := raw["provider"]; !ok || v == nil { + return fmt.Errorf("field provider in SpecDistributionModulesAuth: required") } - if v, ok := raw["validationFailureAction"]; !ok || v == nil { - return fmt.Errorf("field validationFailureAction in SpecDistributionModulesPolicyKyverno: required") + type Plain SpecDistributionModulesAuth + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err } - type Plain SpecDistributionModulesPolicyKyverno + *j = SpecDistributionModulesAuth(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuthProvider) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesAuthProvider: required") + } + type Plain SpecDistributionModulesAuthProvider var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesPolicyKyverno(plain) + *j = SpecDistributionModulesAuthProvider(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyKyvernoValidationFailureAction) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuthProviderType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction { + for _, expected := range enumValues_SpecDistributionModulesAuthProviderType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesAuthProviderType, v) } - *j = SpecDistributionModulesPolicyKyvernoValidationFailureAction(v) + *j = SpecDistributionModulesAuthProviderType(v) return nil } -var enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction = []interface{}{ - "audit", - "enforce", -} - // UnmarshalJSON implements json.Unmarshaler. func (j *SpecKubernetes) UnmarshalJSON(b []byte) error { var raw map[string]interface{} @@ -2859,6 +3189,9 @@ func (j *SpecKubernetes) UnmarshalJSON(b []byte) error { if v, ok := raw["nodeAllowedSshPublicKey"]; !ok || v == nil { return fmt.Errorf("field nodeAllowedSshPublicKey in SpecKubernetes: required") } + if v, ok := raw["nodePoolGlobalAmiType"]; !ok || v == nil { + return fmt.Errorf("field nodePoolGlobalAmiType in SpecKubernetes: required") + } if v, ok := raw["nodePools"]; !ok || v == nil { return fmt.Errorf("field nodePools in SpecKubernetes: required") } @@ -2875,23 +3208,22 @@ func (j *SpecKubernetes) UnmarshalJSON(b []byte) error { } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyGatekeeper) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["enforcementAction"]; !ok || v == nil { - return fmt.Errorf("field enforcementAction in SpecDistributionModulesPolicyGatekeeper: required") - } - if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { - return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyGatekeeper: required") + var ok bool + for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecDistributionModulesPolicyGatekeeper - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType, v) } - *j = SpecDistributionModulesPolicyGatekeeper(plain) + *j = SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType(v) return nil } @@ -2917,124 +3249,160 @@ func (j *SpecPluginsHelmReleasesElemSetElem) UnmarshalJSON(b []byte) error { } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyGatekeeperEnforcementAction) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecDistributionModulesAuthProviderBasicAuth) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["password"]; !ok || v == nil { + return fmt.Errorf("field password in SpecDistributionModulesAuthProviderBasicAuth: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction, v) + if v, ok := raw["username"]; !ok || v == nil { + return fmt.Errorf("field username in SpecDistributionModulesAuthProviderBasicAuth: required") } - *j = SpecDistributionModulesPolicyGatekeeperEnforcementAction(v) + type Plain SpecDistributionModulesAuthProviderBasicAuth + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAuthProviderBasicAuth(plain) return nil } -var enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction = []interface{}{ - "deny", - "dryrun", - "warn", +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuthOverridesIngress) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["host"]; !ok || v == nil { + return fmt.Errorf("field host in SpecDistributionModulesAuthOverridesIngress: required") + } + if v, ok := raw["ingressClass"]; !ok || v == nil { + return fmt.Errorf("field ingressClass in SpecDistributionModulesAuthOverridesIngress: required") + } + type Plain SpecDistributionModulesAuthOverridesIngress + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAuthOverridesIngress(plain) + return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesMonitoring) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuthDex) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesMonitoring: required") + if v, ok := raw["connectors"]; !ok || v == nil { + return fmt.Errorf("field connectors in SpecDistributionModulesAuthDex: required") } - type Plain SpecDistributionModulesMonitoring + type Plain SpecDistributionModulesAuthDex var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesMonitoring(plain) + *j = SpecDistributionModulesAuthDex(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesMonitoringType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecDistributionModulesLoggingCustomOutputs) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesMonitoringType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["audit"]; !ok || v == nil { + return fmt.Errorf("field audit in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["errors"]; !ok || v == nil { + return fmt.Errorf("field errors in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["events"]; !ok || v == nil { + return fmt.Errorf("field events in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["infra"]; !ok || v == nil { + return fmt.Errorf("field infra in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["ingressNginx"]; !ok || v == nil { + return fmt.Errorf("field ingressNginx in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["kubernetes"]; !ok || v == nil { + return fmt.Errorf("field kubernetes in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["systemdCommon"]; !ok || v == nil { + return fmt.Errorf("field systemdCommon in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["systemdEtcd"]; !ok || v == nil { + return fmt.Errorf("field systemdEtcd in SpecDistributionModulesLoggingCustomOutputs: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringType, v) + type Plain SpecDistributionModulesLoggingCustomOutputs + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err } - *j = SpecDistributionModulesMonitoringType(v) + *j = SpecDistributionModulesLoggingCustomOutputs(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesMonitoringMimirBackend) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecDistributionCustomPatchesSecretGeneratorResource) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesMonitoringMimirBackend { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionCustomPatchesSecretGeneratorResource: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringMimirBackend, v) + type Plain SpecDistributionCustomPatchesSecretGeneratorResource + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err } - *j = SpecDistributionModulesMonitoringMimirBackend(v) + *j = SpecDistributionCustomPatchesSecretGeneratorResource(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLogging) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRulePorts) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesLogging: required") + if v, ok := raw["from"]; !ok || v == nil { + return fmt.Errorf("field from in SpecKubernetesNodePoolAdditionalFirewallRulePorts: required") } - type Plain SpecDistributionModulesLogging + if v, ok := raw["to"]; !ok || v == nil { + return fmt.Errorf("field to in SpecKubernetesNodePoolAdditionalFirewallRulePorts: required") + } + type Plain SpecKubernetesNodePoolAdditionalFirewallRulePorts var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesLogging(plain) + *j = SpecKubernetesNodePoolAdditionalFirewallRulePorts(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionCustomPatchesSecretGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesLoggingType { + for _, expected := range enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior, v) } - *j = SpecDistributionModulesLoggingType(v) + *j = SpecDistributionCustomPatchesSecretGeneratorResourceBehavior(v) return nil } @@ -3063,20 +3431,22 @@ func (j *SpecToolsConfigurationTerraformStateS3) UnmarshalJSON(b []byte) error { } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingOpensearch) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecDistributionModulesLoggingLokiBackend) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesLoggingOpensearch: required") + var ok bool + for _, expected := range enumValues_SpecDistributionModulesLoggingLokiBackend { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecDistributionModulesLoggingOpensearch - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingLokiBackend, v) } - *j = SpecDistributionModulesLoggingOpensearch(plain) + *j = SpecDistributionModulesLoggingLokiBackend(v) return nil } @@ -3099,22 +3469,22 @@ func (j *SpecToolsConfigurationTerraformState) UnmarshalJSON(b []byte) error { } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingOpensearchType) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAmiType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesLoggingOpensearchType { + for _, expected := range enumValues_SpecKubernetesNodePoolAmiType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingOpensearchType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAmiType, v) } - *j = SpecDistributionModulesLoggingOpensearchType(v) + *j = SpecKubernetesNodePoolAmiType(v) return nil } @@ -3137,22 +3507,22 @@ func (j *SpecToolsConfigurationTerraform) UnmarshalJSON(b []byte) error { } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingLokiBackend) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolGlobalAmiType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesLoggingLokiBackend { + for _, expected := range enumValues_SpecKubernetesNodePoolGlobalAmiType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingLokiBackend, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolGlobalAmiType, v) } - *j = SpecDistributionModulesLoggingLokiBackend(v) + *j = SpecKubernetesNodePoolGlobalAmiType(v) return nil } @@ -3175,41 +3545,22 @@ func (j *SpecToolsConfiguration) UnmarshalJSON(b []byte) error { } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingCustomOutputs) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["audit"]; !ok || v == nil { - return fmt.Errorf("field audit in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["errors"]; !ok || v == nil { - return fmt.Errorf("field errors in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["events"]; !ok || v == nil { - return fmt.Errorf("field events in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["infra"]; !ok || v == nil { - return fmt.Errorf("field infra in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["ingressNginx"]; !ok || v == nil { - return fmt.Errorf("field ingressNginx in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["kubernetes"]; !ok || v == nil { - return fmt.Errorf("field kubernetes in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["systemdCommon"]; !ok || v == nil { - return fmt.Errorf("field systemdCommon in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["systemdEtcd"]; !ok || v == nil { - return fmt.Errorf("field systemdEtcd in SpecDistributionModulesLoggingCustomOutputs: required") + var ok bool + for _, expected := range enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecDistributionModulesLoggingCustomOutputs - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior, v) } - *j = SpecDistributionModulesLoggingCustomOutputs(plain) + *j = SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior(v) return nil } @@ -3247,248 +3598,222 @@ func (j *Spec) UnmarshalJSON(b []byte) error { } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngress) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLoggingLoki) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["baseDomain"]; !ok || v == nil { - return fmt.Errorf("field baseDomain in SpecDistributionModulesIngress: required") - } - if v, ok := raw["dns"]; !ok || v == nil { - return fmt.Errorf("field dns in SpecDistributionModulesIngress: required") - } - if v, ok := raw["nginx"]; !ok || v == nil { - return fmt.Errorf("field nginx in SpecDistributionModulesIngress: required") + if v, ok := raw["tsdbStartDate"]; !ok || v == nil { + return fmt.Errorf("field tsdbStartDate in SpecDistributionModulesLoggingLoki: required") } - type Plain SpecDistributionModulesIngress + type Plain SpecDistributionModulesLoggingLoki var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngress(plain) + *j = SpecDistributionModulesLoggingLoki(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginx) UnmarshalJSON(b []byte) error { +func (j *TypesKubeToleration) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesIngressNginx: required") + if v, ok := raw["effect"]; !ok || v == nil { + return fmt.Errorf("field effect in TypesKubeToleration: required") } - type Plain SpecDistributionModulesIngressNginx + if v, ok := raw["key"]; !ok || v == nil { + return fmt.Errorf("field key in TypesKubeToleration: required") + } + type Plain TypesKubeToleration var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressNginx(plain) + *j = TypesKubeToleration(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLoggingOpensearchType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesIngressNginxType { + for _, expected := range enumValues_SpecDistributionModulesLoggingOpensearchType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingOpensearchType, v) } - *j = SpecDistributionModulesIngressNginxType(v) + *j = SpecDistributionModulesLoggingOpensearchType(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxTLS) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLoggingOpensearch) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["provider"]; !ok || v == nil { - return fmt.Errorf("field provider in SpecDistributionModulesIngressNginxTLS: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesLoggingOpensearch: required") } - type Plain SpecDistributionModulesIngressNginxTLS + type Plain SpecDistributionModulesLoggingOpensearch var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressNginxTLS(plain) + *j = SpecDistributionModulesLoggingOpensearch(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxTLSSecret) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecKubernetesLogsTypesElem) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["ca"]; !ok || v == nil { - return fmt.Errorf("field ca in SpecDistributionModulesIngressNginxTLSSecret: required") - } - if v, ok := raw["cert"]; !ok || v == nil { - return fmt.Errorf("field cert in SpecDistributionModulesIngressNginxTLSSecret: required") - } - if v, ok := raw["key"]; !ok || v == nil { - return fmt.Errorf("field key in SpecDistributionModulesIngressNginxTLSSecret: required") + var ok bool + for _, expected := range enumValues_SpecKubernetesLogsTypesElem { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecDistributionModulesIngressNginxTLSSecret - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesLogsTypesElem, v) } - *j = SpecDistributionModulesIngressNginxTLSSecret(plain) + *j = SpecKubernetesLogsTypesElem(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxTLSProvider) UnmarshalJSON(b []byte) error { +func (j *TypesKubeTolerationOperator) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesIngressNginxTLSProvider { + for _, expected := range enumValues_TypesKubeTolerationOperator { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxTLSProvider, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationOperator, v) } - *j = SpecDistributionModulesIngressNginxTLSProvider(v) + *j = TypesKubeTolerationOperator(v) return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressDNS) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["private"]; !ok || v == nil { - return fmt.Errorf("field private in SpecDistributionModulesIngressDNS: required") - } - if v, ok := raw["public"]; !ok || v == nil { - return fmt.Errorf("field public in SpecDistributionModulesIngressDNS: required") - } - type Plain SpecDistributionModulesIngressDNS - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesIngressDNS(plain) - return nil +var enumValues_TypesKubeTolerationOperator = []interface{}{ + "Exists", + "Equal", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressDNSPublic) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecDistributionModulesLoggingType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["create"]; !ok || v == nil { - return fmt.Errorf("field create in SpecDistributionModulesIngressDNSPublic: required") - } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionModulesIngressDNSPublic: required") + var ok bool + for _, expected := range enumValues_SpecDistributionModulesLoggingType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecDistributionModulesIngressDNSPublic - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingType, v) } - *j = SpecDistributionModulesIngressDNSPublic(plain) + *j = SpecDistributionModulesLoggingType(v) return nil } - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressDNSPrivate) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecKubernetesLogRetentionDays) UnmarshalJSON(b []byte) error { + var v int + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["create"]; !ok || v == nil { - return fmt.Errorf("field create in SpecDistributionModulesIngressDNSPrivate: required") - } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionModulesIngressDNSPrivate: required") + var ok bool + for _, expected := range enumValues_SpecKubernetesLogRetentionDays { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecDistributionModulesIngressDNSPrivate - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesLogRetentionDays, v) } - *j = SpecDistributionModulesIngressDNSPrivate(plain) + *j = SpecKubernetesLogRetentionDays(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressCertManager) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLogging) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["clusterIssuer"]; !ok || v == nil { - return fmt.Errorf("field clusterIssuer in SpecDistributionModulesIngressCertManager: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesLogging: required") } - type Plain SpecDistributionModulesIngressCertManager + type Plain SpecDistributionModulesLogging var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressCertManager(plain) + *j = SpecDistributionModulesLogging(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressCertManagerClusterIssuer) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecDistributionModulesMonitoringMimirBackend) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["email"]; !ok || v == nil { - return fmt.Errorf("field email in SpecDistributionModulesIngressCertManagerClusterIssuer: required") - } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionModulesIngressCertManagerClusterIssuer: required") + var ok bool + for _, expected := range enumValues_SpecDistributionModulesMonitoringMimirBackend { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecDistributionModulesIngressCertManagerClusterIssuer - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringMimirBackend, v) } - *j = SpecDistributionModulesIngressCertManagerClusterIssuer(plain) + *j = SpecDistributionModulesMonitoringMimirBackend(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressCertManagerClusterIssuerType) UnmarshalJSON(b []byte) error { +func (j *TypesKubeTolerationEffect) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType { + for _, expected := range enumValues_TypesKubeTolerationEffect { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationEffect, v) } - *j = SpecDistributionModulesIngressCertManagerClusterIssuerType(v) + *j = TypesKubeTolerationEffect(v) return nil } @@ -3520,38 +3845,50 @@ func (j *SpecDistributionModulesAuthPomeriumSecrets) UnmarshalJSON(b []byte) err } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDr) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesAwsAuthUser) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesDr: required") + if v, ok := raw["groups"]; !ok || v == nil { + return fmt.Errorf("field groups in SpecKubernetesAwsAuthUser: required") } - type Plain SpecDistributionModulesDr + if v, ok := raw["userarn"]; !ok || v == nil { + return fmt.Errorf("field userarn in SpecKubernetesAwsAuthUser: required") + } + if v, ok := raw["username"]; !ok || v == nil { + return fmt.Errorf("field username in SpecKubernetesAwsAuthUser: required") + } + type Plain SpecKubernetesAwsAuthUser var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesDr(plain) + *j = SpecKubernetesAwsAuthUser(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDrVelero) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesAwsAuthRole) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["eks"]; !ok || v == nil { - return fmt.Errorf("field eks in SpecDistributionModulesDrVelero: required") + if v, ok := raw["groups"]; !ok || v == nil { + return fmt.Errorf("field groups in SpecKubernetesAwsAuthRole: required") } - type Plain SpecDistributionModulesDrVelero + if v, ok := raw["rolearn"]; !ok || v == nil { + return fmt.Errorf("field rolearn in SpecKubernetesAwsAuthRole: required") + } + if v, ok := raw["username"]; !ok || v == nil { + return fmt.Errorf("field username in SpecKubernetesAwsAuthRole: required") + } + type Plain SpecKubernetesAwsAuthRole var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesDrVelero(plain) + *j = SpecKubernetesAwsAuthRole(plain) return nil } @@ -3582,95 +3919,81 @@ func (j *TypesKubeTolerationEffect_1) UnmarshalJSON(b []byte) error { } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDrVeleroEks) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesMonitoringType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesMonitoringType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringType, v) + } + *j = SpecDistributionModulesMonitoringType(v) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesMonitoring) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["bucketName"]; !ok || v == nil { - return fmt.Errorf("field bucketName in SpecDistributionModulesDrVeleroEks: required") - } - if v, ok := raw["region"]; !ok || v == nil { - return fmt.Errorf("field region in SpecDistributionModulesDrVeleroEks: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesMonitoring: required") } - type Plain SpecDistributionModulesDrVeleroEks + type Plain SpecDistributionModulesMonitoring var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesDrVeleroEks(plain) + *j = SpecDistributionModulesMonitoring(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *TypesAwsRegion) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecKubernetesAPIServer) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_TypesAwsRegion { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["privateAccess"]; !ok || v == nil { + return fmt.Errorf("field privateAccess in SpecKubernetesAPIServer: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesAwsRegion, v) + if v, ok := raw["publicAccess"]; !ok || v == nil { + return fmt.Errorf("field publicAccess in SpecKubernetesAPIServer: required") } - *j = TypesAwsRegion(v) + type Plain SpecKubernetesAPIServer + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecKubernetesAPIServer(plain) return nil } -var enumValues_TypesAwsRegion = []interface{}{ - "af-south-1", - "ap-east-1", - "ap-northeast-1", - "ap-northeast-2", - "ap-northeast-3", - "ap-south-1", - "ap-south-2", - "ap-southeast-1", - "ap-southeast-2", - "ap-southeast-3", - "ap-southeast-4", - "ca-central-1", - "eu-central-1", - "eu-central-2", - "eu-north-1", - "eu-south-1", - "eu-south-2", - "eu-west-1", - "eu-west-2", - "eu-west-3", - "me-central-1", - "me-south-1", - "sa-east-1", - "us-east-1", - "us-east-2", - "us-gov-east-1", - "us-gov-west-1", - "us-west-1", - "us-west-2", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDrType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesPolicyGatekeeperEnforcementAction) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesDrType { + for _, expected := range enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction, v) } - *j = SpecDistributionModulesDrType(v) + *j = SpecDistributionModulesPolicyGatekeeperEnforcementAction(v) return nil } @@ -3695,351 +4018,381 @@ func (j *TypesKubeTolerationOperator_1) UnmarshalJSON(b []byte) error { if !ok { return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationOperator_1, v) } - *j = TypesKubeTolerationOperator_1(v) + *j = TypesKubeTolerationOperator_1(v) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecInfrastructureVpn) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["ssh"]; !ok || v == nil { + return fmt.Errorf("field ssh in SpecInfrastructureVpn: required") + } + if v, ok := raw["vpnClientsSubnetCidr"]; !ok || v == nil { + return fmt.Errorf("field vpnClientsSubnetCidr in SpecInfrastructureVpn: required") + } + type Plain SpecInfrastructureVpn + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecInfrastructureVpn(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesPolicyGatekeeper) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["enforcementAction"]; !ok || v == nil { + return fmt.Errorf("field enforcementAction in SpecDistributionModulesPolicyGatekeeper: required") + } + if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { + return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyGatekeeper: required") + } + type Plain SpecDistributionModulesPolicyGatekeeper + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesPolicyGatekeeper(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecInfrastructureVpnSsh) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["allowedFromCidrs"]; !ok || v == nil { + return fmt.Errorf("field allowedFromCidrs in SpecInfrastructureVpnSsh: required") + } + if v, ok := raw["githubUsersName"]; !ok || v == nil { + return fmt.Errorf("field githubUsersName in SpecInfrastructureVpnSsh: required") + } + type Plain SpecInfrastructureVpnSsh + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + if plain.GithubUsersName != nil && len(plain.GithubUsersName) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "githubUsersName", 1) + } + *j = SpecInfrastructureVpnSsh(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuth) UnmarshalJSON(b []byte) error { +func (j *TypesKubeToleration_1) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["provider"]; !ok || v == nil { - return fmt.Errorf("field provider in SpecDistributionModulesAuth: required") + if v, ok := raw["effect"]; !ok || v == nil { + return fmt.Errorf("field effect in TypesKubeToleration_1: required") } - type Plain SpecDistributionModulesAuth + if v, ok := raw["key"]; !ok || v == nil { + return fmt.Errorf("field key in TypesKubeToleration_1: required") + } + if v, ok := raw["value"]; !ok || v == nil { + return fmt.Errorf("field value in TypesKubeToleration_1: required") + } + type Plain TypesKubeToleration_1 var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuth(plain) + *j = TypesKubeToleration_1(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthProvider) UnmarshalJSON(b []byte) error { +func (j *SpecInfrastructureVpc) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesAuthProvider: required") + if v, ok := raw["network"]; !ok || v == nil { + return fmt.Errorf("field network in SpecInfrastructureVpc: required") } - type Plain SpecDistributionModulesAuthProvider + type Plain SpecInfrastructureVpc var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthProvider(plain) + *j = SpecInfrastructureVpc(plain) return nil } +var enumValues_TypesKubeTolerationEffect = []interface{}{ + "NoSchedule", + "PreferNoSchedule", + "NoExecute", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthProviderType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesPolicyKyvernoValidationFailureAction) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesAuthProviderType { + for _, expected := range enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesAuthProviderType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction, v) } - *j = SpecDistributionModulesAuthProviderType(v) + *j = SpecDistributionModulesPolicyKyvernoValidationFailureAction(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeToleration_1) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuthPomerium_2) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["effect"]; !ok || v == nil { - return fmt.Errorf("field effect in TypesKubeToleration_1: required") - } - if v, ok := raw["key"]; !ok || v == nil { - return fmt.Errorf("field key in TypesKubeToleration_1: required") - } - if v, ok := raw["value"]; !ok || v == nil { - return fmt.Errorf("field value in TypesKubeToleration_1: required") + if v, ok := raw["secrets"]; !ok || v == nil { + return fmt.Errorf("field secrets in SpecDistributionModulesAuthPomerium_2: required") } - type Plain TypesKubeToleration_1 + type Plain SpecDistributionModulesAuthPomerium_2 var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = TypesKubeToleration_1(plain) + *j = SpecDistributionModulesAuthPomerium_2(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthProviderBasicAuth) UnmarshalJSON(b []byte) error { +func (j *SpecInfrastructureVpcNetwork) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["password"]; !ok || v == nil { - return fmt.Errorf("field password in SpecDistributionModulesAuthProviderBasicAuth: required") + if v, ok := raw["cidr"]; !ok || v == nil { + return fmt.Errorf("field cidr in SpecInfrastructureVpcNetwork: required") } - if v, ok := raw["username"]; !ok || v == nil { - return fmt.Errorf("field username in SpecDistributionModulesAuthProviderBasicAuth: required") + if v, ok := raw["subnetsCidrs"]; !ok || v == nil { + return fmt.Errorf("field subnetsCidrs in SpecInfrastructureVpcNetwork: required") } - type Plain SpecDistributionModulesAuthProviderBasicAuth + type Plain SpecInfrastructureVpcNetwork var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthProviderBasicAuth(plain) + *j = SpecInfrastructureVpcNetwork(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthOverridesIngress) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesPolicyKyverno) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["host"]; !ok || v == nil { - return fmt.Errorf("field host in SpecDistributionModulesAuthOverridesIngress: required") + if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { + return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyKyverno: required") } - if v, ok := raw["ingressClass"]; !ok || v == nil { - return fmt.Errorf("field ingressClass in SpecDistributionModulesAuthOverridesIngress: required") + if v, ok := raw["validationFailureAction"]; !ok || v == nil { + return fmt.Errorf("field validationFailureAction in SpecDistributionModulesPolicyKyverno: required") } - type Plain SpecDistributionModulesAuthOverridesIngress + type Plain SpecDistributionModulesPolicyKyverno var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthOverridesIngress(plain) + *j = SpecDistributionModulesPolicyKyverno(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthDex) UnmarshalJSON(b []byte) error { +func (j *SpecInfrastructureVpcNetworkSubnetsCidrs) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["connectors"]; !ok || v == nil { - return fmt.Errorf("field connectors in SpecDistributionModulesAuthDex: required") - } - type Plain SpecDistributionModulesAuthDex - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesAuthDex(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthPomerium_2) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err + if v, ok := raw["private"]; !ok || v == nil { + return fmt.Errorf("field private in SpecInfrastructureVpcNetworkSubnetsCidrs: required") } - if v, ok := raw["secrets"]; !ok || v == nil { - return fmt.Errorf("field secrets in SpecDistributionModulesAuthPomerium_2: required") + if v, ok := raw["public"]; !ok || v == nil { + return fmt.Errorf("field public in SpecInfrastructureVpcNetworkSubnetsCidrs: required") } - type Plain SpecDistributionModulesAuthPomerium_2 + type Plain SpecInfrastructureVpcNetworkSubnetsCidrs var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthPomerium_2(plain) + *j = SpecInfrastructureVpcNetworkSubnetsCidrs(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesSecretGeneratorResource) UnmarshalJSON(b []byte) error { +func (j *SpecDistribution) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionCustomPatchesSecretGeneratorResource: required") + if v, ok := raw["modules"]; !ok || v == nil { + return fmt.Errorf("field modules in SpecDistribution: required") } - type Plain SpecDistributionCustomPatchesSecretGeneratorResource + type Plain SpecDistribution var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionCustomPatchesSecretGeneratorResource(plain) + *j = SpecDistribution(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesSecretGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesPolicyType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior { + for _, expected := range enumValues_SpecDistributionModulesPolicyType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyType, v) } - *j = SpecDistributionCustomPatchesSecretGeneratorResourceBehavior(v) + *j = SpecDistributionModulesPolicyType(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesConfigMapGeneratorResource) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModules) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionCustomPatchesConfigMapGeneratorResource: required") + if v, ok := raw["dr"]; !ok || v == nil { + return fmt.Errorf("field dr in SpecDistributionModules: required") } - type Plain SpecDistributionCustomPatchesConfigMapGeneratorResource + if v, ok := raw["ingress"]; !ok || v == nil { + return fmt.Errorf("field ingress in SpecDistributionModules: required") + } + if v, ok := raw["logging"]; !ok || v == nil { + return fmt.Errorf("field logging in SpecDistributionModules: required") + } + if v, ok := raw["policy"]; !ok || v == nil { + return fmt.Errorf("field policy in SpecDistributionModules: required") + } + type Plain SpecDistributionModules var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionCustomPatchesConfigMapGeneratorResource(plain) + *j = SpecDistributionModules(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecDistributionModulesPolicy) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesPolicy: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior, v) + type Plain SpecDistributionModulesPolicy + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err } - *j = SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior(v) + *j = SpecDistributionModulesPolicy(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeToleration) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionCommonProvider) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["effect"]; !ok || v == nil { - return fmt.Errorf("field effect in TypesKubeToleration: required") - } - if v, ok := raw["key"]; !ok || v == nil { - return fmt.Errorf("field key in TypesKubeToleration: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionCommonProvider: required") } - type Plain TypesKubeToleration + type Plain SpecDistributionCommonProvider var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = TypesKubeToleration(plain) + *j = SpecDistributionCommonProvider(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeTolerationOperator) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecDistributionModulesTracing) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_TypesKubeTolerationOperator { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesTracing: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationOperator, v) + type Plain SpecDistributionModulesTracing + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err } - *j = TypesKubeTolerationOperator(v) + *j = SpecDistributionModulesTracing(plain) return nil } -var enumValues_TypesKubeTolerationOperator = []interface{}{ - "Exists", - "Equal", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeTolerationEffect) UnmarshalJSON(b []byte) error { +func (j *EksclusterKfdV1Alpha2Kind) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_TypesKubeTolerationEffect { + for _, expected := range enumValues_EksclusterKfdV1Alpha2Kind { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationEffect, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_EksclusterKfdV1Alpha2Kind, v) } - *j = TypesKubeTolerationEffect(v) + *j = EksclusterKfdV1Alpha2Kind(v) return nil } -var enumValues_TypesKubeTolerationEffect = []interface{}{ - "NoSchedule", - "PreferNoSchedule", - "NoExecute", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *EksclusterKfdV1Alpha2Kind) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesTracingTempoBackend) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_EksclusterKfdV1Alpha2Kind { + for _, expected := range enumValues_SpecDistributionModulesTracingTempoBackend { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_EksclusterKfdV1Alpha2Kind, v) - } - *j = EksclusterKfdV1Alpha2Kind(v) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCommonProvider) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionCommonProvider: required") - } - type Plain SpecDistributionCommonProvider - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingTempoBackend, v) } - *j = SpecDistributionCommonProvider(plain) + *j = SpecDistributionModulesTracingTempoBackend(v) return nil } diff --git a/pkg/apis/kfddistribution/v1alpha2/public/schema.go b/pkg/apis/kfddistribution/v1alpha2/public/schema.go index d6f1b3eb9..e8f0ddf11 100644 --- a/pkg/apis/kfddistribution/v1alpha2/public/schema.go +++ b/pkg/apis/kfddistribution/v1alpha2/public/schema.go @@ -6,8 +6,11 @@ import ( "encoding/json" "fmt" "reflect" + + "github.com/sighupio/go-jsonschema/pkg/types" ) +// KFD modules deployed on top of an existing Kubernetes cluster. type KfddistributionKfdV1Alpha2 struct { // ApiVersion corresponds to the JSON schema field "apiVersion". ApiVersion string `json:"apiVersion" yaml:"apiVersion" mapstructure:"apiVersion"` @@ -27,7 +30,8 @@ type KfddistributionKfdV1Alpha2Kind string const KfddistributionKfdV1Alpha2KindKFDDistribution KfddistributionKfdV1Alpha2Kind = "KFDDistribution" type Metadata struct { - // Name corresponds to the JSON schema field "name". + // The name of the cluster. It will also be used as a prefix for all the other + // resources created. Name string `json:"name" yaml:"name" mapstructure:"name"` } @@ -35,7 +39,9 @@ type Spec struct { // Distribution corresponds to the JSON schema field "distribution". Distribution SpecDistribution `json:"distribution" yaml:"distribution" mapstructure:"distribution"` - // DistributionVersion corresponds to the JSON schema field "distributionVersion". + // Defines which KFD version will be installed and, in consequence, the Kubernetes + // version used to create the cluster. It supports git tags and branches. Example: + // `v1.30.1`. DistributionVersion string `json:"distributionVersion" yaml:"distributionVersion" mapstructure:"distributionVersion"` // Plugins corresponds to the JSON schema field "plugins". @@ -49,36 +55,45 @@ type SpecDistribution struct { // CustomPatches corresponds to the JSON schema field "customPatches". CustomPatches *SpecDistributionCustompatches `json:"customPatches,omitempty" yaml:"customPatches,omitempty" mapstructure:"customPatches,omitempty"` - // The kubeconfig file path + // The path to the kubeconfig file. Kubeconfig string `json:"kubeconfig" yaml:"kubeconfig" mapstructure:"kubeconfig"` // Modules corresponds to the JSON schema field "modules". Modules SpecDistributionModules `json:"modules" yaml:"modules" mapstructure:"modules"` } +// Common configuration for all the distribution modules. type SpecDistributionCommon struct { - // The node selector to use to place the pods for all the KFD modules + // The node selector to use to place the pods for all the KFD modules. Follows + // Kubernetes selector format. Example: `node.kubernetes.io/role: infra`. NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` // Provider corresponds to the JSON schema field "provider". Provider *SpecDistributionCommonProvider `json:"provider,omitempty" yaml:"provider,omitempty" mapstructure:"provider,omitempty"` // URL of the registry where to pull images from for the Distribution phase. - // (Default is registry.sighup.io/fury). + // (Default is `registry.sighup.io/fury`). // // NOTE: If plugins are pulling from the default registry, the registry will be // replaced for the plugin too. Registry *string `json:"registry,omitempty" yaml:"registry,omitempty" mapstructure:"registry,omitempty"` - // The relative path to the vendor directory, does not need to be changed + // The relative path to the vendor directory, does not need to be changed. RelativeVendorPath *string `json:"relativeVendorPath,omitempty" yaml:"relativeVendorPath,omitempty" mapstructure:"relativeVendorPath,omitempty"` - // The tolerations that will be added to the pods for all the KFD modules + // An array with the tolerations that will be added to the pods for all the KFD + // modules. Follows Kubernetes tolerations format. Example: + // + // ```yaml + // - effect: NoSchedule + // key: node.kubernetes.io/role + // value: infra + // ``` Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } type SpecDistributionCommonProvider struct { - // The type of the provider + // The provider type. Don't set. FOR INTERNAL USE ONLY. Type string `json:"type" yaml:"type" mapstructure:"type"` } @@ -277,8 +292,11 @@ type SpecDistributionModules struct { Tracing *SpecDistributionModulesTracing `json:"tracing,omitempty" yaml:"tracing,omitempty" mapstructure:"tracing,omitempty"` } +// Configuration for the Auth module. type SpecDistributionModulesAuth struct { - // The base domain for the auth module + // The base domain for the ingresses created by the Auth module (Gangplank, + // Pomerium, Dex). Notice that when the ingress module type is `dual`, these will + // use the `external` ingress class. BaseDomain *string `json:"baseDomain,omitempty" yaml:"baseDomain,omitempty" mapstructure:"baseDomain,omitempty"` // Dex corresponds to the JSON schema field "dex". @@ -294,11 +312,25 @@ type SpecDistributionModulesAuth struct { Provider SpecDistributionModulesAuthProvider `json:"provider" yaml:"provider" mapstructure:"provider"` } +// Configuration for the Dex package. type SpecDistributionModulesAuthDex struct { - // The additional static clients for dex + // Additional static clients defitions that will be added to the default clients + // included with the distribution in Dex's configuration. Example: + // + // ```yaml + // additionalStaticClients: + // - id: my-custom-client + // name: "A custom additional static client" + // redirectURIs: + // - "https://myapp.tld/redirect" + // - "https://alias.tld/oidc-callback" + // secret: supersecretpassword + // ``` + // Reference: https://dexidp.io/docs/connectors/local/ AdditionalStaticClients []interface{} `json:"additionalStaticClients,omitempty" yaml:"additionalStaticClients,omitempty" mapstructure:"additionalStaticClients,omitempty"` - // The connectors for dex + // A list with each item defining a Dex connector. Follows Dex connectors + // configuration format: https://dexidp.io/docs/connectors/ Connectors []interface{} `json:"connectors" yaml:"connectors" mapstructure:"connectors"` // Expiry corresponds to the JSON schema field "expiry". @@ -316,25 +348,29 @@ type SpecDistributionModulesAuthDexExpiry struct { SigningKeys *string `json:"signingKeys,omitempty" yaml:"signingKeys,omitempty" mapstructure:"signingKeys,omitempty"` } +// Override the common configuration with a particular configuration for the Auth +// module. type SpecDistributionModulesAuthOverrides struct { - // Ingresses corresponds to the JSON schema field "ingresses". + // Override the definition of the Auth module ingresses. Ingresses SpecDistributionModulesAuthOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` - // The node selector to use to place the pods for the auth module + // Set to override the node selector used to place the pods of the Auth module. NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - // The tolerations that will be added to the pods for the auth module + // Set to override the tolerations that will be added to the pods of the Auth + // module. Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } type SpecDistributionModulesAuthOverridesIngress struct { - // The host of the ingress + // Use this host for the ingress instead of the default one. Host string `json:"host" yaml:"host" mapstructure:"host"` - // The ingress class of the ingress + // Use this ingress class for the ingress instead of the default one. IngressClass string `json:"ingressClass" yaml:"ingressClass" mapstructure:"ingressClass"` } +// Override the definition of the Auth module ingresses. type SpecDistributionModulesAuthOverridesIngresses map[string]SpecDistributionModulesAuthOverridesIngress type SpecDistributionModulesAuthPomerium interface{} @@ -459,15 +495,23 @@ type SpecDistributionModulesAuthProvider struct { // BasicAuth corresponds to the JSON schema field "basicAuth". BasicAuth *SpecDistributionModulesAuthProviderBasicAuth `json:"basicAuth,omitempty" yaml:"basicAuth,omitempty" mapstructure:"basicAuth,omitempty"` - // The type of the provider, must be ***none***, ***sso*** or ***basicAuth*** + // The type of the Auth provider, options are: + // - `none`: will disable authentication in the infrastructural ingresses. + // - `sso`: will protect the infrastructural ingresses with Pomerium and Dex (SSO) + // and require authentication before accessing them. + // - `basicAuth`: will protect the infrastructural ingresses with HTTP basic auth + // (username and password) authentication. + // + // Default is `none`. Type SpecDistributionModulesAuthProviderType `json:"type" yaml:"type" mapstructure:"type"` } +// Configuration for the HTTP Basic Auth provider. type SpecDistributionModulesAuthProviderBasicAuth struct { - // The password for the basic auth + // The password for logging in with the HTTP basic authentication. Password string `json:"password" yaml:"password" mapstructure:"password"` - // The username for the basic auth + // The username for logging in with the HTTP basic authentication. Username string `json:"username" yaml:"username" mapstructure:"username"` } @@ -479,11 +523,16 @@ const ( SpecDistributionModulesAuthProviderTypeSso SpecDistributionModulesAuthProviderType = "sso" ) +// Configuration for the Disaster Recovery module. type SpecDistributionModulesDr struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - // The type of the DR, must be ***none*** or ***on-premises*** + // The type of the Disaster Recovery, must be `none` or `on-premises`. `none` + // disables the module and `on-premises` will install Velero and an optional MinIO + // deployment. + // + // Default is `none`. Type SpecDistributionModulesDrType `json:"type" yaml:"type" mapstructure:"type"` // Velero corresponds to the JSON schema field "velero". @@ -497,18 +546,24 @@ const ( SpecDistributionModulesDrTypeOnPremises SpecDistributionModulesDrType = "on-premises" ) +// Configuration for the Velero package. type SpecDistributionModulesDrVelero struct { - // The backend for velero + // The storage backend type for Velero. `minio` will use an in-cluster MinIO + // deployment for object storage, `externalEndpoint` can be used to point to an + // external S3-compatible object storage instead of deploying an in-cluster MinIO. Backend *SpecDistributionModulesDrVeleroBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` - // ExternalEndpoint corresponds to the JSON schema field "externalEndpoint". + // Configuration for Velero's external storage backend. ExternalEndpoint *SpecDistributionModulesDrVeleroExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - // The retention time for velero - RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` + // Configuration for Velero's backup schedules. + Schedules *SpecDistributionModulesDrVeleroSchedules `json:"schedules,omitempty" yaml:"schedules,omitempty" mapstructure:"schedules,omitempty"` + + // Configuration for the additional snapshotController component installation. + SnapshotController *SpecDistributionModulesDrVeleroSnapshotController `json:"snapshotController,omitempty" yaml:"snapshotController,omitempty" mapstructure:"snapshotController,omitempty"` } type SpecDistributionModulesDrVeleroBackend string @@ -518,42 +573,108 @@ const ( SpecDistributionModulesDrVeleroBackendMinio SpecDistributionModulesDrVeleroBackend = "minio" ) +// Configuration for Velero's external storage backend. type SpecDistributionModulesDrVeleroExternalEndpoint struct { - // The access key id for velero backend + // The access key ID (username) for the external S3-compatible bucket. AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` - // The bucket name for velero backend + // The bucket name of the external S3-compatible object storage. BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` - // The endpoint for velero + // External S3-compatible endpoint for Velero's storage. Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` - // If true, the endpoint will be insecure + // If true, will use HTTP as protocol instead of HTTPS. Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` - // The secret access key for velero backend + // The secret access key (password) for the external S3-compatible bucket. SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` } +// Configuration for Velero's backup schedules. +type SpecDistributionModulesDrVeleroSchedules struct { + // Configuration for Velero schedules. + Definitions *SpecDistributionModulesDrVeleroSchedulesDefinitions `json:"definitions,omitempty" yaml:"definitions,omitempty" mapstructure:"definitions,omitempty"` + + // Whether to install or not the default `manifests` and `full` backups schedules. + // Default is `true`. + Install *bool `json:"install,omitempty" yaml:"install,omitempty" mapstructure:"install,omitempty"` +} + +// Configuration for Velero schedules. +type SpecDistributionModulesDrVeleroSchedulesDefinitions struct { + // Configuration for Velero's manifests backup schedule. + Full *SpecDistributionModulesDrVeleroSchedulesDefinitionsFull `json:"full,omitempty" yaml:"full,omitempty" mapstructure:"full,omitempty"` + + // Configuration for Velero's manifests backup schedule. + Manifests *SpecDistributionModulesDrVeleroSchedulesDefinitionsManifests `json:"manifests,omitempty" yaml:"manifests,omitempty" mapstructure:"manifests,omitempty"` +} + +// Configuration for Velero's manifests backup schedule. +type SpecDistributionModulesDrVeleroSchedulesDefinitionsFull struct { + // The cron expression for the `full` backup schedule (default `0 1 * * *`). + Schedule *string `json:"schedule,omitempty" yaml:"schedule,omitempty" mapstructure:"schedule,omitempty"` + + // EXPERIMENTAL (if you do more than one backups, the following backups after the + // first are not automatically restorable, see + // https://github.com/vmware-tanzu/velero/issues/7057#issuecomment-2466815898 for + // the manual restore solution): SnapshotMoveData specifies whether snapshot data + // should be moved. Velero will create a new volume from the snapshot and upload + // the content to the storageLocation. + SnapshotMoveData *bool `json:"snapshotMoveData,omitempty" yaml:"snapshotMoveData,omitempty" mapstructure:"snapshotMoveData,omitempty"` + + // The Time To Live (TTL) of the backups created by the backup schedules (default + // `720h0m0s`, 30 days). Notice that changing this value will affect only newly + // created backups, prior backups will keep the old TTL. + Ttl *string `json:"ttl,omitempty" yaml:"ttl,omitempty" mapstructure:"ttl,omitempty"` +} + +// Configuration for Velero's manifests backup schedule. +type SpecDistributionModulesDrVeleroSchedulesDefinitionsManifests struct { + // The cron expression for the `manifests` backup schedule (default `*/15 * * * + // *`). + Schedule *string `json:"schedule,omitempty" yaml:"schedule,omitempty" mapstructure:"schedule,omitempty"` + + // The Time To Live (TTL) of the backups created by the backup schedules (default + // `720h0m0s`, 30 days). Notice that changing this value will affect only newly + // created backups, prior backups will keep the old TTL. + Ttl *string `json:"ttl,omitempty" yaml:"ttl,omitempty" mapstructure:"ttl,omitempty"` +} + +// Configuration for the additional snapshotController component installation. +type SpecDistributionModulesDrVeleroSnapshotController struct { + // Whether to install or not the snapshotController component in the cluster. + // Before enabling this field, check if your CSI driver does not have + // snapshotController built-in. + Install *bool `json:"install,omitempty" yaml:"install,omitempty" mapstructure:"install,omitempty"` +} + type SpecDistributionModulesIngress struct { - // the base domain used for all the KFD ingresses, if in the nginx dual - // configuration, it should be the same as the - // .spec.distribution.modules.ingress.dns.private.name zone + // The base domain used for all the KFD infrastructural ingresses. If using the + // nginx `dual` type, this value should be the same as the domain associated with + // the `internal` ingress class. BaseDomain string `json:"baseDomain" yaml:"baseDomain" mapstructure:"baseDomain"` - // CertManager corresponds to the JSON schema field "certManager". + // Configuration for the cert-manager package. Required even if + // `ingress.nginx.type` is `none`, cert-manager is used for managing other + // certificates in the cluster besides the TLS termination certificates for the + // ingresses. CertManager *SpecDistributionModulesIngressCertManager `json:"certManager,omitempty" yaml:"certManager,omitempty" mapstructure:"certManager,omitempty"` // Forecastle corresponds to the JSON schema field "forecastle". Forecastle *SpecDistributionModulesIngressForecastle `json:"forecastle,omitempty" yaml:"forecastle,omitempty" mapstructure:"forecastle,omitempty"` - // Configurations for the nginx ingress controller module + // Configurations for the Ingress nginx controller package. Nginx SpecDistributionModulesIngressNginx `json:"nginx" yaml:"nginx" mapstructure:"nginx"` // Overrides corresponds to the JSON schema field "overrides". Overrides *SpecDistributionModulesIngressOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } +// Configuration for the cert-manager package. Required even if +// `ingress.nginx.type` is `none`, cert-manager is used for managing other +// certificates in the cluster besides the TLS termination certificates for the +// ingresses. type SpecDistributionModulesIngressCertManager struct { // ClusterIssuer corresponds to the JSON schema field "clusterIssuer". ClusterIssuer SpecDistributionModulesIngressCertManagerClusterIssuer `json:"clusterIssuer" yaml:"clusterIssuer" mapstructure:"clusterIssuer"` @@ -562,17 +683,23 @@ type SpecDistributionModulesIngressCertManager struct { Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } +// Configuration for the cert-manager's ACME clusterIssuer used to request +// certificates from Let's Encrypt. type SpecDistributionModulesIngressCertManagerClusterIssuer struct { - // The email of the cluster issuer + // The email address to use during the certificate issuing process. Email string `json:"email" yaml:"email" mapstructure:"email"` - // The name of the cluster issuer + // The name of the clusterIssuer. Name string `json:"name" yaml:"name" mapstructure:"name"` - // The custom solvers configurations + // The list of challenge solvers to use instead of the default one for the + // `http01` challenge. Check [cert manager's + // documentation](https://cert-manager.io/docs/configuration/acme/#adding-multiple-solver-types) + // for examples for this field. Solvers []interface{} `json:"solvers,omitempty" yaml:"solvers,omitempty" mapstructure:"solvers,omitempty"` - // The type of the cluster issuer, must be ***http01*** + // The type of the clusterIssuer. Only `http01` challenge is supported for + // KFDDistribution kind. See solvers for arbitrary configurations. Type *SpecDistributionModulesIngressCertManagerClusterIssuerType `json:"type,omitempty" yaml:"type,omitempty" mapstructure:"type,omitempty"` } @@ -592,14 +719,24 @@ type SpecDistributionModulesIngressNginx struct { // Tls corresponds to the JSON schema field "tls". Tls *SpecDistributionModulesIngressNginxTLS `json:"tls,omitempty" yaml:"tls,omitempty" mapstructure:"tls,omitempty"` - // The type of the nginx ingress controller, must be ***none***, ***single*** or - // ***dual*** + // The type of the Ingress nginx controller, options are: + // - `none`: no ingress controller will be installed and no infrastructural + // ingresses will be created. + // - `single`: a single ingress controller with ingress class `nginx` will be + // installed to manage all the ingress resources, infrastructural ingresses will + // be created. + // - `dual`: two independent ingress controllers will be installed, one for the + // `internal` ingress class intended for private ingresses and one for the + // `external` ingress class intended for public ingresses. KFD infrastructural + // ingresses wil use the `internal` ingress class when using the dual type. + // + // Default is `single`. Type SpecDistributionModulesIngressNginxType `json:"type" yaml:"type" mapstructure:"type"` } type SpecDistributionModulesIngressNginxTLS struct { - // The provider of the TLS certificate, must be ***none***, ***certManager*** or - // ***secret*** + // The provider of the TLS certificates for the ingresses, one of: `none`, + // `certManager`, or `secret`. Provider SpecDistributionModulesIngressNginxTLSProvider `json:"provider" yaml:"provider" mapstructure:"provider"` // Secret corresponds to the JSON schema field "secret". @@ -614,15 +751,18 @@ const ( SpecDistributionModulesIngressNginxTLSProviderSecret SpecDistributionModulesIngressNginxTLSProvider = "secret" ) +// Kubernetes TLS secret for the ingresses TLS certificate. type SpecDistributionModulesIngressNginxTLSSecret struct { - // Ca corresponds to the JSON schema field "ca". + // The Certificate Authority certificate file's content. You can use the + // `"{file://}"` notation to get the content from a file. Ca string `json:"ca" yaml:"ca" mapstructure:"ca"` - // The certificate file content or you can use the file notation to get the - // content from a file + // The certificate file's content. You can use the `"{file://}"` notation to + // get the content from a file. Cert string `json:"cert" yaml:"cert" mapstructure:"cert"` - // Key corresponds to the JSON schema field "key". + // The signing key file's content. You can use the `"{file://}"` notation to + // get the content from a file. Key string `json:"key" yaml:"key" mapstructure:"key"` } @@ -634,14 +774,17 @@ const ( SpecDistributionModulesIngressNginxTypeSingle SpecDistributionModulesIngressNginxType = "single" ) +// Override the common configuration with a particular configuration for the +// Ingress module. type SpecDistributionModulesIngressOverrides struct { // Ingresses corresponds to the JSON schema field "ingresses". Ingresses *SpecDistributionModulesIngressOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` - // The node selector to use to place the pods for the ingress module + // Set to override the node selector used to place the pods of the Ingress module. NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - // The tolerations that will be added to the pods for the ingress module + // Set to override the tolerations that will be added to the pods of the Ingress + // module. Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } @@ -650,6 +793,7 @@ type SpecDistributionModulesIngressOverridesIngresses struct { Forecastle *TypesFuryModuleOverridesIngress `json:"forecastle,omitempty" yaml:"forecastle,omitempty" mapstructure:"forecastle,omitempty"` } +// Configuration for the Logging module. type SpecDistributionModulesLogging struct { // Cerebro corresponds to the JSON schema field "cerebro". Cerebro *SpecDistributionModulesLoggingCerebro `json:"cerebro,omitempty" yaml:"cerebro,omitempty" mapstructure:"cerebro,omitempty"` @@ -672,83 +816,104 @@ type SpecDistributionModulesLogging struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - // selects the logging stack. Choosing none will disable the centralized logging. - // Choosing opensearch will deploy and configure the Logging Operator and an + // Selects the logging stack. Options are: + // - `none`: will disable the centralized logging. + // - `opensearch`: will deploy and configure the Logging Operator and an // OpenSearch cluster (can be single or triple for HA) where the logs will be - // stored. Choosing loki will use a distributed Grafana Loki instead of OpenSearh - // for storage. Choosing customOuput the Logging Operator will be deployed and - // installed but with no local storage, you will have to create the needed Outputs - // and ClusterOutputs to ship the logs to your desired storage. + // stored. + // - `loki`: will use a distributed Grafana Loki instead of OpenSearch for + // storage. + // - `customOuputs`: the Logging Operator will be deployed and installed but + // without in-cluster storage, you will have to create the needed Outputs and + // ClusterOutputs to ship the logs to your desired storage. + // + // Default is `opensearch`. Type SpecDistributionModulesLoggingType `json:"type" yaml:"type" mapstructure:"type"` } +// DEPRECATED since KFD v1.26.6, 1.27.5, v1.28.0. type SpecDistributionModulesLoggingCerebro struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } -// when using the customOutputs logging type, you need to manually specify the spec -// of the several Output and ClusterOutputs that the Logging Operator expects to -// forward the logs collected by the pre-defined flows. +// When using the `customOutputs` logging type, you need to manually specify the +// spec of the several `Output` and `ClusterOutputs` that the Logging Operator +// expects to forward the logs collected by the pre-defined flows. type SpecDistributionModulesLoggingCustomOutputs struct { - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `audit` Flow will be sent. This + // will be the `spec` section of the `Output` object. It must be a string (and not + // a YAML object) following the OutputSpec definition. Use the `nullout` output to + // discard the flow: `nullout: {}` Audit string `json:"audit" yaml:"audit" mapstructure:"audit"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `errors` Flow will be sent. This + // will be the `spec` section of the `Output` object. It must be a string (and not + // a YAML object) following the OutputSpec definition. Use the `nullout` output to + // discard the flow: `nullout: {}` Errors string `json:"errors" yaml:"errors" mapstructure:"errors"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `events` Flow will be sent. This + // will be the `spec` section of the `Output` object. It must be a string (and not + // a YAML object) following the OutputSpec definition. Use the `nullout` output to + // discard the flow: `nullout: {}` Events string `json:"events" yaml:"events" mapstructure:"events"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `infra` Flow will be sent. This + // will be the `spec` section of the `Output` object. It must be a string (and not + // a YAML object) following the OutputSpec definition. Use the `nullout` output to + // discard the flow: `nullout: {}` Infra string `json:"infra" yaml:"infra" mapstructure:"infra"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `ingressNginx` Flow will be sent. + // This will be the `spec` section of the `Output` object. It must be a string + // (and not a YAML object) following the OutputSpec definition. Use the `nullout` + // output to discard the flow: `nullout: {}` IngressNginx string `json:"ingressNginx" yaml:"ingressNginx" mapstructure:"ingressNginx"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `kubernetes` Flow will be sent. + // This will be the `spec` section of the `Output` object. It must be a string + // (and not a YAML object) following the OutputSpec definition. Use the `nullout` + // output to discard the flow: `nullout: {}` Kubernetes string `json:"kubernetes" yaml:"kubernetes" mapstructure:"kubernetes"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `systemdCommon` Flow will be sent. + // This will be the `spec` section of the `Output` object. It must be a string + // (and not a YAML object) following the OutputSpec definition. Use the `nullout` + // output to discard the flow: `nullout: {}` SystemdCommon string `json:"systemdCommon" yaml:"systemdCommon" mapstructure:"systemdCommon"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `systemdEtcd` Flow will be sent. + // This will be the `spec` section of the `Output` object. It must be a string + // (and not a YAML object) following the OutputSpec definition. Use the `nullout` + // output to discard the flow: `nullout: {}` SystemdEtcd string `json:"systemdEtcd" yaml:"systemdEtcd" mapstructure:"systemdEtcd"` } +// Configuration for the Loki package. type SpecDistributionModulesLoggingLoki struct { - // Backend corresponds to the JSON schema field "backend". + // The storage backend type for Loki. `minio` will use an in-cluster MinIO + // deployment for object storage, `externalEndpoint` can be used to point to an + // external object storage instead of deploying an in-cluster MinIO. Backend *SpecDistributionModulesLoggingLokiBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` - // ExternalEndpoint corresponds to the JSON schema field "externalEndpoint". + // Configuration for Loki's external storage backend. ExternalEndpoint *SpecDistributionModulesLoggingLokiExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` // Resources corresponds to the JSON schema field "resources". Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` + + // Starting from versions 1.28.4, 1.29.5 and 1.30.0 of KFD, Loki will change the + // time series database from BoltDB to TSDB and the schema from v11 to v13 that it + // uses to store the logs. + // + // The value of this field will determine the date when Loki will start writing + // using the new TSDB and the schema v13, always at midnight UTC. The old BoltDB + // and schema will be kept until they expire for reading purposes. + // + // Value must be a string in `ISO 8601` date format (`yyyy-mm-dd`). Example: + // `2024-11-18`. + TsdbStartDate types.SerializableDate `json:"tsdbStartDate" yaml:"tsdbStartDate" mapstructure:"tsdbStartDate"` } type SpecDistributionModulesLoggingLokiBackend string @@ -758,23 +923,25 @@ const ( SpecDistributionModulesLoggingLokiBackendMinio SpecDistributionModulesLoggingLokiBackend = "minio" ) +// Configuration for Loki's external storage backend. type SpecDistributionModulesLoggingLokiExternalEndpoint struct { - // The access key id of the loki external endpoint + // The access key ID (username) for the external S3-compatible bucket. AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` - // The bucket name of the loki external endpoint + // The bucket name of the external S3-compatible object storage. BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` - // The endpoint of the loki external endpoint + // External S3-compatible endpoint for Loki's storage. Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` - // If true, the loki external endpoint will be insecure + // If true, will use HTTP as protocol instead of HTTPS. Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` - // The secret access key of the loki external endpoint + // The secret access key (password) for the external S3-compatible bucket. SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` } +// Configuration for Logging's MinIO deployment. type SpecDistributionModulesLoggingMinio struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` @@ -782,15 +949,15 @@ type SpecDistributionModulesLoggingMinio struct { // RootUser corresponds to the JSON schema field "rootUser". RootUser *SpecDistributionModulesLoggingMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` - // The PVC size for each minio disk, 6 disks total + // The PVC size for each MinIO disk, 6 disks total. StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` } type SpecDistributionModulesLoggingMinioRootUser struct { - // The password of the minio root user + // The password for the default MinIO root user. Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` - // The username of the minio root user + // The username for the default MinIO root user. Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` } @@ -801,10 +968,12 @@ type SpecDistributionModulesLoggingOpensearch struct { // Resources corresponds to the JSON schema field "resources". Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` - // The storage size for the opensearch pods + // The storage size for the OpenSearch volumes. Follows Kubernetes resources + // storage requests. Default is `150Gi`. StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` - // The type of the opensearch, must be ***single*** or ***triple*** + // The type of OpenSearch deployment. One of: `single` for a single replica or + // `triple` for an HA 3-replicas deployment. Type SpecDistributionModulesLoggingOpensearchType `json:"type" yaml:"type" mapstructure:"type"` } @@ -815,6 +984,7 @@ const ( SpecDistributionModulesLoggingOpensearchTypeTriple SpecDistributionModulesLoggingOpensearchType = "triple" ) +// Configuration for the Logging Operator. type SpecDistributionModulesLoggingOperator struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` @@ -826,1524 +996,1612 @@ const ( SpecDistributionModulesLoggingTypeCustomOutputs SpecDistributionModulesLoggingType = "customOutputs" SpecDistributionModulesLoggingTypeLoki SpecDistributionModulesLoggingType = "loki" SpecDistributionModulesLoggingTypeNone SpecDistributionModulesLoggingType = "none" - SpecDistributionModulesLoggingTypeOpensearch SpecDistributionModulesLoggingType = "opensearch" ) -// configuration for the Monitoring module components -type SpecDistributionModulesMonitoring struct { - // Alertmanager corresponds to the JSON schema field "alertmanager". - Alertmanager *SpecDistributionModulesMonitoringAlertManager `json:"alertmanager,omitempty" yaml:"alertmanager,omitempty" mapstructure:"alertmanager,omitempty"` - - // BlackboxExporter corresponds to the JSON schema field "blackboxExporter". - BlackboxExporter *SpecDistributionModulesMonitoringBlackboxExporter `json:"blackboxExporter,omitempty" yaml:"blackboxExporter,omitempty" mapstructure:"blackboxExporter,omitempty"` - - // Grafana corresponds to the JSON schema field "grafana". - Grafana *SpecDistributionModulesMonitoringGrafana `json:"grafana,omitempty" yaml:"grafana,omitempty" mapstructure:"grafana,omitempty"` - - // KubeStateMetrics corresponds to the JSON schema field "kubeStateMetrics". - KubeStateMetrics *SpecDistributionModulesMonitoringKubeStateMetrics `json:"kubeStateMetrics,omitempty" yaml:"kubeStateMetrics,omitempty" mapstructure:"kubeStateMetrics,omitempty"` - - // Mimir corresponds to the JSON schema field "mimir". - Mimir *SpecDistributionModulesMonitoringMimir `json:"mimir,omitempty" yaml:"mimir,omitempty" mapstructure:"mimir,omitempty"` - - // Minio corresponds to the JSON schema field "minio". - Minio *SpecDistributionModulesMonitoringMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // Prometheus corresponds to the JSON schema field "prometheus". - Prometheus *SpecDistributionModulesMonitoringPrometheus `json:"prometheus,omitempty" yaml:"prometheus,omitempty" mapstructure:"prometheus,omitempty"` - - // PrometheusAgent corresponds to the JSON schema field "prometheusAgent". - PrometheusAgent *SpecDistributionModulesMonitoringPrometheusAgent `json:"prometheusAgent,omitempty" yaml:"prometheusAgent,omitempty" mapstructure:"prometheusAgent,omitempty"` - - // The type of the monitoring, must be ***none***, ***prometheus***, - // ***prometheusAgent*** or ***mimir***. - // - // - `none`: will disable the whole monitoring stack. - // - `prometheus`: will install Prometheus Operator and a preconfigured Prometheus - // instace, Alertmanager, a set of alert rules, exporters needed to monitor all - // the components of the cluster, Grafana and a series of dashboards to view the - // collected metrics, and more. - // - `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus - // in Agent mode (no alerting, no queries, no storage), and all the exporters - // needed to get metrics for the status of the cluster and the workloads. Useful - // when having a centralized (remote) Prometheus where to ship the metrics and not - // storing them locally in the cluster. - // - `mimir`: will install the same as the `prometheus` option, and in addition - // Grafana Mimir that allows for longer retention of metrics and the usage of - // Object Storage. - Type SpecDistributionModulesMonitoringType `json:"type" yaml:"type" mapstructure:"type"` - - // X509Exporter corresponds to the JSON schema field "x509Exporter". - X509Exporter *SpecDistributionModulesMonitoringX509Exporter `json:"x509Exporter,omitempty" yaml:"x509Exporter,omitempty" mapstructure:"x509Exporter,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesPolicyKyvernoValidationFailureAction) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction, v) + } + *j = SpecDistributionModulesPolicyKyvernoValidationFailureAction(v) + return nil } -type SpecDistributionModulesMonitoringAlertManager struct { - // The webhook url to send deadman switch monitoring, for example to use with - // healthchecks.io - DeadManSwitchWebhookUrl *string `json:"deadManSwitchWebhookUrl,omitempty" yaml:"deadManSwitchWebhookUrl,omitempty" mapstructure:"deadManSwitchWebhookUrl,omitempty"` - - // If true, the default rules will be installed - InstallDefaultRules *bool `json:"installDefaultRules,omitempty" yaml:"installDefaultRules,omitempty" mapstructure:"installDefaultRules,omitempty"` - - // The slack webhook url to send alerts - SlackWebhookUrl *string `json:"slackWebhookUrl,omitempty" yaml:"slackWebhookUrl,omitempty" mapstructure:"slackWebhookUrl,omitempty"` +var enumValues_SpecDistributionModulesIngressNginxType = []interface{}{ + "none", + "single", + "dual", } -type SpecDistributionModulesMonitoringBlackboxExporter struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressNginxType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesIngressNginxType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxType, v) + } + *j = SpecDistributionModulesIngressNginxType(v) + return nil } -type SpecDistributionModulesMonitoringGrafana struct { - // Setting this to true will deploy an additional `grafana-basic-auth` ingress - // protected with Grafana's basic auth instead of SSO. It's intended use is as a - // temporary ingress for when there are problems with the SSO login flow. - // - // Notice that by default anonymous access is enabled. - BasicAuthIngress *bool `json:"basicAuthIngress,omitempty" yaml:"basicAuthIngress,omitempty" mapstructure:"basicAuthIngress,omitempty"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // [JMESPath](http://jmespath.org/examples.html) expression to retrieve the user's - // role. Example: - // - // ```yaml - // usersRoleAttributePath: "contains(groups[*], 'beta') && 'Admin' || - // contains(groups[*], 'gamma') && 'Editor' || contains(groups[*], 'delta') && - // 'Viewer' - // ``` - // - // More details in [Grafana's - // documentation](https://grafana.com/docs/grafana/latest/setup-grafana/configure-security/configure-authentication/generic-oauth/#configure-role-mapping). - UsersRoleAttributePath *string `json:"usersRoleAttributePath,omitempty" yaml:"usersRoleAttributePath,omitempty" mapstructure:"usersRoleAttributePath,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressNginxTLSSecret) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["ca"]; !ok || v == nil { + return fmt.Errorf("field ca in SpecDistributionModulesIngressNginxTLSSecret: required") + } + if v, ok := raw["cert"]; !ok || v == nil { + return fmt.Errorf("field cert in SpecDistributionModulesIngressNginxTLSSecret: required") + } + if v, ok := raw["key"]; !ok || v == nil { + return fmt.Errorf("field key in SpecDistributionModulesIngressNginxTLSSecret: required") + } + type Plain SpecDistributionModulesIngressNginxTLSSecret + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressNginxTLSSecret(plain) + return nil } -type SpecDistributionModulesMonitoringKubeStateMetrics struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressNginxTLSProvider) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesIngressNginxTLSProvider { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxTLSProvider, v) + } + *j = SpecDistributionModulesIngressNginxTLSProvider(v) + return nil } -type SpecDistributionModulesMonitoringMimir struct { - // The backend for the mimir pods, must be ***minio*** or ***externalEndpoint*** - Backend *SpecDistributionModulesMonitoringMimirBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` - - // ExternalEndpoint corresponds to the JSON schema field "externalEndpoint". - ExternalEndpoint *SpecDistributionModulesMonitoringMimirExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // The retention time for the mimir pods - RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` +var enumValues_SpecDistributionModulesIngressNginxTLSProvider = []interface{}{ + "certManager", + "secret", + "none", } -type SpecDistributionModulesMonitoringMimirBackend string - -const ( - SpecDistributionModulesMonitoringMimirBackendExternalEndpoint SpecDistributionModulesMonitoringMimirBackend = "externalEndpoint" - SpecDistributionModulesMonitoringMimirBackendMinio SpecDistributionModulesMonitoringMimirBackend = "minio" -) - -type SpecDistributionModulesMonitoringMimirExternalEndpoint struct { - // The access key id of the external mimir backend - AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` - - // The bucket name of the external mimir backend - BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` - - // The endpoint of the external mimir backend - Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` - - // If true, the external mimir backend will not use tls - Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` - - // The secret access key of the external mimir backend - SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressCertManager) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["clusterIssuer"]; !ok || v == nil { + return fmt.Errorf("field clusterIssuer in SpecDistributionModulesIngressCertManager: required") + } + type Plain SpecDistributionModulesIngressCertManager + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressCertManager(plain) + return nil } -type SpecDistributionModulesMonitoringMinio struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // RootUser corresponds to the JSON schema field "rootUser". - RootUser *SpecDistributionModulesMonitoringMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` - - // The storage size for the minio pods - StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressNginx) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesIngressNginx: required") + } + type Plain SpecDistributionModulesIngressNginx + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressNginx(plain) + return nil } -type SpecDistributionModulesMonitoringMinioRootUser struct { - // The password for the minio root user - Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` - - // The username for the minio root user - Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` -} - -type SpecDistributionModulesMonitoringPrometheus struct { - // Set this option to ship the collected metrics to a remote Prometheus receiver. - // - // `remoteWrite` is an array of objects that allows configuring the - // [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for - // Prometheus. The objects in the array follow [the same schema as in the - // prometheus - // operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec). - RemoteWrite []SpecDistributionModulesMonitoringPrometheusRemoteWriteElem `json:"remoteWrite,omitempty" yaml:"remoteWrite,omitempty" mapstructure:"remoteWrite,omitempty"` - - // Resources corresponds to the JSON schema field "resources". - Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` - - // The retention size for the k8s Prometheus instance. - RetentionSize *string `json:"retentionSize,omitempty" yaml:"retentionSize,omitempty" mapstructure:"retentionSize,omitempty"` - - // The retention time for the K8s Prometheus instance. - RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` - - // The storage size for the k8s Prometheus instance. - StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` -} - -type SpecDistributionModulesMonitoringPrometheusAgent struct { - // Set this option to ship the collected metrics to a remote Prometheus receiver. - // - // `remoteWrite` is an array of objects that allows configuring the - // [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for - // Prometheus. The objects in the array follow [the same schema as in the - // prometheus - // operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec). - RemoteWrite []SpecDistributionModulesMonitoringPrometheusAgentRemoteWriteElem `json:"remoteWrite,omitempty" yaml:"remoteWrite,omitempty" mapstructure:"remoteWrite,omitempty"` - - // Resources corresponds to the JSON schema field "resources". - Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` -} - -type SpecDistributionModulesMonitoringPrometheusAgentRemoteWriteElem map[string]interface{} - -type SpecDistributionModulesMonitoringPrometheusRemoteWriteElem map[string]interface{} - -type SpecDistributionModulesMonitoringType string - -const ( - SpecDistributionModulesMonitoringTypeMimir SpecDistributionModulesMonitoringType = "mimir" - SpecDistributionModulesMonitoringTypeNone SpecDistributionModulesMonitoringType = "none" - SpecDistributionModulesMonitoringTypePrometheus SpecDistributionModulesMonitoringType = "prometheus" - SpecDistributionModulesMonitoringTypePrometheusAgent SpecDistributionModulesMonitoringType = "prometheusAgent" -) - -type SpecDistributionModulesMonitoringX509Exporter struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` -} - -type SpecDistributionModulesNetworking struct { - // Cilium corresponds to the JSON schema field "cilium". - Cilium *SpecDistributionModulesNetworkingCilium `json:"cilium,omitempty" yaml:"cilium,omitempty" mapstructure:"cilium,omitempty"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // TigeraOperator corresponds to the JSON schema field "tigeraOperator". - TigeraOperator *SpecDistributionModulesNetworkingTigeraOperator `json:"tigeraOperator,omitempty" yaml:"tigeraOperator,omitempty" mapstructure:"tigeraOperator,omitempty"` - - // The type of networking to use, either ***none***, ***calico*** or ***cilium*** - Type SpecDistributionModulesNetworkingType `json:"type" yaml:"type" mapstructure:"type"` -} - -type SpecDistributionModulesNetworkingCilium struct { - // MaskSize corresponds to the JSON schema field "maskSize". - MaskSize string `json:"maskSize" yaml:"maskSize" mapstructure:"maskSize"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // PodCidr corresponds to the JSON schema field "podCidr". - PodCidr TypesCidr `json:"podCidr" yaml:"podCidr" mapstructure:"podCidr"` -} - -type SpecDistributionModulesNetworkingTigeraOperator struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` -} - -type SpecDistributionModulesNetworkingType string - -const ( - SpecDistributionModulesNetworkingTypeCalico SpecDistributionModulesNetworkingType = "calico" - SpecDistributionModulesNetworkingTypeCilium SpecDistributionModulesNetworkingType = "cilium" - SpecDistributionModulesNetworkingTypeNone SpecDistributionModulesNetworkingType = "none" -) - -type SpecDistributionModulesPolicy struct { - // Gatekeeper corresponds to the JSON schema field "gatekeeper". - Gatekeeper *SpecDistributionModulesPolicyGatekeeper `json:"gatekeeper,omitempty" yaml:"gatekeeper,omitempty" mapstructure:"gatekeeper,omitempty"` - - // Kyverno corresponds to the JSON schema field "kyverno". - Kyverno *SpecDistributionModulesPolicyKyverno `json:"kyverno,omitempty" yaml:"kyverno,omitempty" mapstructure:"kyverno,omitempty"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // The type of security to use, either ***none***, ***gatekeeper*** or - // ***kyverno*** - Type SpecDistributionModulesPolicyType `json:"type" yaml:"type" mapstructure:"type"` -} - -type SpecDistributionModulesPolicyGatekeeper struct { - // This parameter adds namespaces to Gatekeeper's exemption list, so it will not - // enforce the constraints on them. - AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"` - - // The enforcement action to use for the gatekeeper module - EnforcementAction SpecDistributionModulesPolicyGatekeeperEnforcementAction `json:"enforcementAction" yaml:"enforcementAction" mapstructure:"enforcementAction"` - - // If true, the default policies will be installed - InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` -} - -type SpecDistributionModulesPolicyGatekeeperEnforcementAction string - -const ( - SpecDistributionModulesPolicyGatekeeperEnforcementActionDeny SpecDistributionModulesPolicyGatekeeperEnforcementAction = "deny" - SpecDistributionModulesPolicyGatekeeperEnforcementActionDryrun SpecDistributionModulesPolicyGatekeeperEnforcementAction = "dryrun" - SpecDistributionModulesPolicyGatekeeperEnforcementActionWarn SpecDistributionModulesPolicyGatekeeperEnforcementAction = "warn" -) - -type SpecDistributionModulesPolicyKyverno struct { - // This parameter adds namespaces to Kyverno's exemption list, so it will not - // enforce the constraints on them. - AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"` - - // If true, the default policies will be installed - InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // The validation failure action to use for the kyverno module - ValidationFailureAction SpecDistributionModulesPolicyKyvernoValidationFailureAction `json:"validationFailureAction" yaml:"validationFailureAction" mapstructure:"validationFailureAction"` -} - -type SpecDistributionModulesPolicyKyvernoValidationFailureAction string - -const ( - SpecDistributionModulesPolicyKyvernoValidationFailureActionAudit SpecDistributionModulesPolicyKyvernoValidationFailureAction = "audit" - SpecDistributionModulesPolicyKyvernoValidationFailureActionEnforce SpecDistributionModulesPolicyKyvernoValidationFailureAction = "enforce" -) - -type SpecDistributionModulesPolicyType string - -const ( - SpecDistributionModulesPolicyTypeGatekeeper SpecDistributionModulesPolicyType = "gatekeeper" - SpecDistributionModulesPolicyTypeKyverno SpecDistributionModulesPolicyType = "kyverno" - SpecDistributionModulesPolicyTypeNone SpecDistributionModulesPolicyType = "none" -) - -type SpecDistributionModulesTracing struct { - // Minio corresponds to the JSON schema field "minio". - Minio *SpecDistributionModulesTracingMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // Tempo corresponds to the JSON schema field "tempo". - Tempo *SpecDistributionModulesTracingTempo `json:"tempo,omitempty" yaml:"tempo,omitempty" mapstructure:"tempo,omitempty"` - - // The type of tracing to use, either ***none*** or ***tempo*** - Type SpecDistributionModulesTracingType `json:"type" yaml:"type" mapstructure:"type"` -} - -type SpecDistributionModulesTracingMinio struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // RootUser corresponds to the JSON schema field "rootUser". - RootUser *SpecDistributionModulesTracingMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` - - // The storage size for the minio pods - StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` -} - -type SpecDistributionModulesTracingMinioRootUser struct { - // The password for the minio root user - Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` - - // The username for the minio root user - Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` -} - -type SpecDistributionModulesTracingTempo struct { - // The backend for the tempo pods, must be ***minio*** or ***externalEndpoint*** - Backend *SpecDistributionModulesTracingTempoBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` - - // ExternalEndpoint corresponds to the JSON schema field "externalEndpoint". - ExternalEndpoint *SpecDistributionModulesTracingTempoExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // The retention time for the tempo pods - RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` -} - -type SpecDistributionModulesTracingTempoBackend string - -const ( - SpecDistributionModulesTracingTempoBackendExternalEndpoint SpecDistributionModulesTracingTempoBackend = "externalEndpoint" - SpecDistributionModulesTracingTempoBackendMinio SpecDistributionModulesTracingTempoBackend = "minio" -) - -type SpecDistributionModulesTracingTempoExternalEndpoint struct { - // The access key id of the external tempo backend - AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` - - // The bucket name of the external tempo backend - BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` - - // The endpoint of the external tempo backend - Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` - - // If true, the external tempo backend will not use tls - Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` - - // The secret access key of the external tempo backend - SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` -} - -type SpecDistributionModulesTracingType string - -const ( - SpecDistributionModulesTracingTypeNone SpecDistributionModulesTracingType = "none" - SpecDistributionModulesTracingTypeTempo SpecDistributionModulesTracingType = "tempo" -) - -type SpecPlugins struct { - // Helm corresponds to the JSON schema field "helm". - Helm *SpecPluginsHelm `json:"helm,omitempty" yaml:"helm,omitempty" mapstructure:"helm,omitempty"` - - // Kustomize corresponds to the JSON schema field "kustomize". - Kustomize SpecPluginsKustomize `json:"kustomize,omitempty" yaml:"kustomize,omitempty" mapstructure:"kustomize,omitempty"` -} - -type SpecPluginsHelm struct { - // Releases corresponds to the JSON schema field "releases". - Releases SpecPluginsHelmReleases `json:"releases,omitempty" yaml:"releases,omitempty" mapstructure:"releases,omitempty"` - - // Repositories corresponds to the JSON schema field "repositories". - Repositories SpecPluginsHelmRepositories `json:"repositories,omitempty" yaml:"repositories,omitempty" mapstructure:"repositories,omitempty"` -} - -type SpecPluginsHelmReleases []struct { - // The chart of the release - Chart string `json:"chart" yaml:"chart" mapstructure:"chart"` - - // The name of the release - Name string `json:"name" yaml:"name" mapstructure:"name"` - - // The namespace of the release - Namespace string `json:"namespace" yaml:"namespace" mapstructure:"namespace"` - - // Set corresponds to the JSON schema field "set". - Set []SpecPluginsHelmReleasesElemSetElem `json:"set,omitempty" yaml:"set,omitempty" mapstructure:"set,omitempty"` - - // The values of the release - Values []string `json:"values,omitempty" yaml:"values,omitempty" mapstructure:"values,omitempty"` - - // The version of the release - Version *string `json:"version,omitempty" yaml:"version,omitempty" mapstructure:"version,omitempty"` -} - -type SpecPluginsHelmReleasesElemSetElem struct { - // The name of the set - Name string `json:"name" yaml:"name" mapstructure:"name"` - - // The value of the set - Value string `json:"value" yaml:"value" mapstructure:"value"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressCertManagerClusterIssuer) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["email"]; !ok || v == nil { + return fmt.Errorf("field email in SpecDistributionModulesIngressCertManagerClusterIssuer: required") + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionModulesIngressCertManagerClusterIssuer: required") + } + type Plain SpecDistributionModulesIngressCertManagerClusterIssuer + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressCertManagerClusterIssuer(plain) + return nil } -type SpecPluginsHelmRepositories []struct { - // The name of the repository - Name string `json:"name" yaml:"name" mapstructure:"name"` - - // The url of the repository - Url string `json:"url" yaml:"url" mapstructure:"url"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressCertManagerClusterIssuerType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType, v) + } + *j = SpecDistributionModulesIngressCertManagerClusterIssuerType(v) + return nil } -type SpecPluginsKustomize []struct { - // The folder of the kustomize plugin - Folder string `json:"folder" yaml:"folder" mapstructure:"folder"` - - // The name of the kustomize plugin - Name string `json:"name" yaml:"name" mapstructure:"name"` +var enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType = []interface{}{ + "http01", } -type TypesCidr string - -type TypesEnvRef string - -type TypesFileRef string - -type TypesFuryModuleComponentOverrides struct { - // The node selector to use to place the pods for the minio module - NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - - // The tolerations that will be added to the pods for the cert-manager module - Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngress) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["baseDomain"]; !ok || v == nil { + return fmt.Errorf("field baseDomain in SpecDistributionModulesIngress: required") + } + if v, ok := raw["nginx"]; !ok || v == nil { + return fmt.Errorf("field nginx in SpecDistributionModulesIngress: required") + } + type Plain SpecDistributionModulesIngress + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngress(plain) + return nil } -type TypesFuryModuleComponentOverrides_1 struct { - // NodeSelector corresponds to the JSON schema field "nodeSelector". - NodeSelector TypesKubeNodeSelector_1 `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - - // Tolerations corresponds to the JSON schema field "tolerations". - Tolerations []TypesKubeToleration_1 `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesDr) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesDr: required") + } + type Plain SpecDistributionModulesDr + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesDr(plain) + return nil } -type TypesFuryModuleOverrides struct { - // Ingresses corresponds to the JSON schema field "ingresses". - Ingresses TypesFuryModuleOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` - - // The node selector to use to place the pods for the security module - NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - - // The tolerations that will be added to the pods for the monitoring module - Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesDrVeleroBackend) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesDrVeleroBackend { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrVeleroBackend, v) + } + *j = SpecDistributionModulesDrVeleroBackend(v) + return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModules) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLoggingCustomOutputs) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["dr"]; !ok || v == nil { - return fmt.Errorf("field dr in SpecDistributionModules: required") + if v, ok := raw["audit"]; !ok || v == nil { + return fmt.Errorf("field audit in SpecDistributionModulesLoggingCustomOutputs: required") } - if v, ok := raw["ingress"]; !ok || v == nil { - return fmt.Errorf("field ingress in SpecDistributionModules: required") + if v, ok := raw["errors"]; !ok || v == nil { + return fmt.Errorf("field errors in SpecDistributionModulesLoggingCustomOutputs: required") } - if v, ok := raw["logging"]; !ok || v == nil { - return fmt.Errorf("field logging in SpecDistributionModules: required") + if v, ok := raw["events"]; !ok || v == nil { + return fmt.Errorf("field events in SpecDistributionModulesLoggingCustomOutputs: required") } - if v, ok := raw["policy"]; !ok || v == nil { - return fmt.Errorf("field policy in SpecDistributionModules: required") + if v, ok := raw["infra"]; !ok || v == nil { + return fmt.Errorf("field infra in SpecDistributionModulesLoggingCustomOutputs: required") } - type Plain SpecDistributionModules + if v, ok := raw["ingressNginx"]; !ok || v == nil { + return fmt.Errorf("field ingressNginx in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["kubernetes"]; !ok || v == nil { + return fmt.Errorf("field kubernetes in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["systemdCommon"]; !ok || v == nil { + return fmt.Errorf("field systemdCommon in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["systemdEtcd"]; !ok || v == nil { + return fmt.Errorf("field systemdEtcd in SpecDistributionModulesLoggingCustomOutputs: required") + } + type Plain SpecDistributionModulesLoggingCustomOutputs var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModules(plain) + *j = SpecDistributionModulesLoggingCustomOutputs(plain) return nil } -var enumValues_SpecDistributionModulesMonitoringType = []interface{}{ - "none", - "prometheus", - "prometheusAgent", - "mimir", +var enumValues_SpecDistributionModulesDrVeleroBackend = []interface{}{ + "minio", + "externalEndpoint", +} + +var enumValues_SpecDistributionModulesLoggingLokiBackend = []interface{}{ + "minio", + "externalEndpoint", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesMonitoringMimirBackend) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLoggingLokiBackend) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesMonitoringMimirBackend { + for _, expected := range enumValues_SpecDistributionModulesLoggingLokiBackend { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringMimirBackend, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingLokiBackend, v) } - *j = SpecDistributionModulesMonitoringMimirBackend(v) + *j = SpecDistributionModulesLoggingLokiBackend(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesMonitoring) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesDrType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesDrType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrType, v) + } + *j = SpecDistributionModulesDrType(v) + return nil +} + +var enumValues_SpecDistributionModulesDrType = []interface{}{ + "none", + "on-premises", +} + +// Override the common configuration with a particular configuration for the +// module. +type TypesFuryModuleOverrides struct { + // Ingresses corresponds to the JSON schema field "ingresses". + Ingresses TypesFuryModuleOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` + + // Set to override the node selector used to place the pods of the module. + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // Set to override the tolerations that will be added to the pods of the module. + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +} + +type TypesKubeResourcesLimits struct { + // The CPU limit for the Pod. Example: `1000m`. + Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` + + // The memory limit for the Pod. Example: `1G`. + Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` +} + +type TypesKubeResourcesRequests struct { + // The CPU request for the Pod, in cores. Example: `500m`. + Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` + + // The memory request for the Pod. Example: `500M`. + Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` +} + +type TypesKubeResources struct { + // Limits corresponds to the JSON schema field "limits". + Limits *TypesKubeResourcesLimits `json:"limits,omitempty" yaml:"limits,omitempty" mapstructure:"limits,omitempty"` + + // Requests corresponds to the JSON schema field "requests". + Requests *TypesKubeResourcesRequests `json:"requests,omitempty" yaml:"requests,omitempty" mapstructure:"requests,omitempty"` +} + +type TypesFuryModuleOverridesIngresses map[string]TypesFuryModuleOverridesIngress + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesLoggingLoki) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesMonitoring: required") + if v, ok := raw["tsdbStartDate"]; !ok || v == nil { + return fmt.Errorf("field tsdbStartDate in SpecDistributionModulesLoggingLoki: required") } - type Plain SpecDistributionModulesMonitoring + type Plain SpecDistributionModulesLoggingLoki var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesMonitoring(plain) + *j = SpecDistributionModulesLoggingLoki(plain) return nil } -var enumValues_SpecDistributionModulesMonitoringMimirBackend = []interface{}{ - "minio", - "externalEndpoint", +type TypesFuryModuleOverridesIngress struct { + // If true, the ingress will not have authentication even if + // `.spec.modules.auth.provider.type` is SSO or Basic Auth. + DisableAuth *bool `json:"disableAuth,omitempty" yaml:"disableAuth,omitempty" mapstructure:"disableAuth,omitempty"` + + // Use this host for the ingress instead of the default one. + Host *string `json:"host,omitempty" yaml:"host,omitempty" mapstructure:"host,omitempty"` + + // Use this ingress class for the ingress instead of the default one. + IngressClass *string `json:"ingressClass,omitempty" yaml:"ingressClass,omitempty" mapstructure:"ingressClass,omitempty"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLogging) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuth) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesLogging: required") + if v, ok := raw["provider"]; !ok || v == nil { + return fmt.Errorf("field provider in SpecDistributionModulesAuth: required") } - type Plain SpecDistributionModulesLogging + type Plain SpecDistributionModulesAuth var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesLogging(plain) + *j = SpecDistributionModulesAuth(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesNetworkingCilium) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuthProvider) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["maskSize"]; !ok || v == nil { - return fmt.Errorf("field maskSize in SpecDistributionModulesNetworkingCilium: required") - } - if v, ok := raw["podCidr"]; !ok || v == nil { - return fmt.Errorf("field podCidr in SpecDistributionModulesNetworkingCilium: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesAuthProvider: required") } - type Plain SpecDistributionModulesNetworkingCilium + type Plain SpecDistributionModulesAuthProvider var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesNetworkingCilium(plain) + *j = SpecDistributionModulesAuthProvider(plain) return nil } +var enumValues_SpecDistributionModulesLoggingOpensearchType = []interface{}{ + "single", + "triple", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLoggingOpensearchType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesLoggingType { + for _, expected := range enumValues_SpecDistributionModulesLoggingOpensearchType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingOpensearchType, v) } - *j = SpecDistributionModulesLoggingType(v) + *j = SpecDistributionModulesLoggingOpensearchType(v) return nil } -var enumValues_SpecDistributionModulesLoggingType = []interface{}{ - "none", - "opensearch", - "loki", - "customOutputs", -} - -var enumValues_SpecDistributionModulesNetworkingType = []interface{}{ - "none", - "calico", - "cilium", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesNetworkingType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuthProviderType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesNetworkingType { + for _, expected := range enumValues_SpecDistributionModulesAuthProviderType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesNetworkingType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesAuthProviderType, v) } - *j = SpecDistributionModulesNetworkingType(v) + *j = SpecDistributionModulesAuthProviderType(v) return nil } +var enumValues_SpecDistributionModulesAuthProviderType = []interface{}{ + "none", + "basicAuth", + "sso", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingOpensearch) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuthProviderBasicAuth) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesLoggingOpensearch: required") + if v, ok := raw["password"]; !ok || v == nil { + return fmt.Errorf("field password in SpecDistributionModulesAuthProviderBasicAuth: required") } - type Plain SpecDistributionModulesLoggingOpensearch + if v, ok := raw["username"]; !ok || v == nil { + return fmt.Errorf("field username in SpecDistributionModulesAuthProviderBasicAuth: required") + } + type Plain SpecDistributionModulesAuthProviderBasicAuth var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesLoggingOpensearch(plain) + *j = SpecDistributionModulesAuthProviderBasicAuth(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingOpensearchType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecDistributionModulesLoggingOpensearch) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesLoggingOpensearchType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesLoggingOpensearch: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingOpensearchType, v) + type Plain SpecDistributionModulesLoggingOpensearch + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err } - *j = SpecDistributionModulesLoggingOpensearchType(v) + *j = SpecDistributionModulesLoggingOpensearch(plain) return nil } -type TypesKubeResources struct { - // Limits corresponds to the JSON schema field "limits". - Limits *TypesKubeResourcesLimits `json:"limits,omitempty" yaml:"limits,omitempty" mapstructure:"limits,omitempty"` - - // Requests corresponds to the JSON schema field "requests". - Requests *TypesKubeResourcesRequests `json:"requests,omitempty" yaml:"requests,omitempty" mapstructure:"requests,omitempty"` -} - -var enumValues_SpecDistributionModulesLoggingOpensearchType = []interface{}{ - "single", - "triple", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesNetworking) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuthOverridesIngress) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesNetworking: required") + if v, ok := raw["host"]; !ok || v == nil { + return fmt.Errorf("field host in SpecDistributionModulesAuthOverridesIngress: required") } - type Plain SpecDistributionModulesNetworking + if v, ok := raw["ingressClass"]; !ok || v == nil { + return fmt.Errorf("field ingressClass in SpecDistributionModulesAuthOverridesIngress: required") + } + type Plain SpecDistributionModulesAuthOverridesIngress var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesNetworking(plain) + *j = SpecDistributionModulesAuthOverridesIngress(plain) return nil } -type TypesKubeResourcesRequests struct { - // The cpu request for the prometheus pods - Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` - - // The memory request for the opensearch pods - Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` -} - -var enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction = []interface{}{ - "deny", - "dryrun", - "warn", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyGatekeeperEnforcementAction) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecDistributionModulesAuthDex) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["connectors"]; !ok || v == nil { + return fmt.Errorf("field connectors in SpecDistributionModulesAuthDex: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction, v) + type Plain SpecDistributionModulesAuthDex + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err } - *j = SpecDistributionModulesPolicyGatekeeperEnforcementAction(v) + *j = SpecDistributionModulesAuthDex(plain) return nil } -type TypesKubeResourcesLimits struct { - // The cpu limit for the loki pods - Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` - - // The memory limit for the opensearch pods - Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` +var enumValues_SpecDistributionModulesLoggingType = []interface{}{ + "none", + "opensearch", + "loki", + "customOutputs", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingLokiBackend) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLoggingType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesLoggingLokiBackend { + for _, expected := range enumValues_SpecDistributionModulesLoggingType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingLokiBackend, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingType, v) } - *j = SpecDistributionModulesLoggingLokiBackend(v) + *j = SpecDistributionModulesLoggingType(v) return nil } -var enumValues_SpecDistributionModulesLoggingLokiBackend = []interface{}{ - "minio", - "externalEndpoint", +type TypesFuryModuleComponentOverrides struct { + // Set to override the node selector used to place the pods of the package. + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // Set to override the tolerations that will be added to the pods of the package. + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } +const SpecDistributionModulesLoggingTypeOpensearch SpecDistributionModulesLoggingType = "opensearch" + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingCustomOutputs) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionCustomPatchesSecretGeneratorResource) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["audit"]; !ok || v == nil { - return fmt.Errorf("field audit in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["errors"]; !ok || v == nil { - return fmt.Errorf("field errors in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["events"]; !ok || v == nil { - return fmt.Errorf("field events in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["infra"]; !ok || v == nil { - return fmt.Errorf("field infra in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["ingressNginx"]; !ok || v == nil { - return fmt.Errorf("field ingressNginx in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["kubernetes"]; !ok || v == nil { - return fmt.Errorf("field kubernetes in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["systemdCommon"]; !ok || v == nil { - return fmt.Errorf("field systemdCommon in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["systemdEtcd"]; !ok || v == nil { - return fmt.Errorf("field systemdEtcd in SpecDistributionModulesLoggingCustomOutputs: required") + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionCustomPatchesSecretGeneratorResource: required") } - type Plain SpecDistributionModulesLoggingCustomOutputs + type Plain SpecDistributionCustomPatchesSecretGeneratorResource var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesLoggingCustomOutputs(plain) + *j = SpecDistributionCustomPatchesSecretGeneratorResource(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyGatekeeper) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecDistributionCustomPatchesSecretGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["enforcementAction"]; !ok || v == nil { - return fmt.Errorf("field enforcementAction in SpecDistributionModulesPolicyGatekeeper: required") - } - if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { - return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyGatekeeper: required") + var ok bool + for _, expected := range enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecDistributionModulesPolicyGatekeeper - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior, v) } - *j = SpecDistributionModulesPolicyGatekeeper(plain) + *j = SpecDistributionCustomPatchesSecretGeneratorResourceBehavior(v) return nil } +var enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = []interface{}{ + "create", + "replace", + "merge", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngress) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLogging) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["baseDomain"]; !ok || v == nil { - return fmt.Errorf("field baseDomain in SpecDistributionModulesIngress: required") - } - if v, ok := raw["nginx"]; !ok || v == nil { - return fmt.Errorf("field nginx in SpecDistributionModulesIngress: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesLogging: required") } - type Plain SpecDistributionModulesIngress + type Plain SpecDistributionModulesLogging var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngress(plain) + *j = SpecDistributionModulesLogging(plain) return nil } -var enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction = []interface{}{ - "audit", - "enforce", +type SpecDistributionModulesMonitoringAlertManager struct { + // The webhook URL to send dead man's switch monitoring, for example to use with + // healthchecks.io. + DeadManSwitchWebhookUrl *string `json:"deadManSwitchWebhookUrl,omitempty" yaml:"deadManSwitchWebhookUrl,omitempty" mapstructure:"deadManSwitchWebhookUrl,omitempty"` + + // Set to false to avoid installing the Prometheus rules (alerts) included with + // the distribution. + InstallDefaultRules *bool `json:"installDefaultRules,omitempty" yaml:"installDefaultRules,omitempty" mapstructure:"installDefaultRules,omitempty"` + + // The Slack webhook URL where to send the infrastructural and workload alerts to. + SlackWebhookUrl *string `json:"slackWebhookUrl,omitempty" yaml:"slackWebhookUrl,omitempty" mapstructure:"slackWebhookUrl,omitempty"` +} + +type SpecDistributionModulesMonitoringBlackboxExporter struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +type SpecDistributionModulesMonitoringGrafana struct { + // Setting this to true will deploy an additional `grafana-basic-auth` ingress + // protected with Grafana's basic auth instead of SSO. It's intended use is as a + // temporary ingress for when there are problems with the SSO login flow. + // + // Notice that by default anonymous access is enabled. + BasicAuthIngress *bool `json:"basicAuthIngress,omitempty" yaml:"basicAuthIngress,omitempty" mapstructure:"basicAuthIngress,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // [JMESPath](http://jmespath.org/examples.html) expression to retrieve the user's + // role. Example: + // + // ```yaml + // usersRoleAttributePath: "contains(groups[*], 'beta') && 'Admin' || + // contains(groups[*], 'gamma') && 'Editor' || contains(groups[*], 'delta') && + // 'Viewer' + // ``` + // + // More details in [Grafana's + // documentation](https://grafana.com/docs/grafana/latest/setup-grafana/configure-security/configure-authentication/generic-oauth/#configure-role-mapping). + UsersRoleAttributePath *string `json:"usersRoleAttributePath,omitempty" yaml:"usersRoleAttributePath,omitempty" mapstructure:"usersRoleAttributePath,omitempty"` +} + +type SpecDistributionModulesMonitoringKubeStateMetrics struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +type SpecDistributionModulesMonitoringMimirBackend string + +var enumValues_SpecDistributionModulesMonitoringMimirBackend = []interface{}{ + "minio", + "externalEndpoint", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyKyvernoValidationFailureAction) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesMonitoringMimirBackend) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction { + for _, expected := range enumValues_SpecDistributionModulesMonitoringMimirBackend { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringMimirBackend, v) } - *j = SpecDistributionModulesPolicyKyvernoValidationFailureAction(v) + *j = SpecDistributionModulesMonitoringMimirBackend(v) return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginx) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesIngressNginx: required") - } - type Plain SpecDistributionModulesIngressNginx - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesIngressNginx(plain) - return nil +const ( + SpecDistributionModulesMonitoringMimirBackendMinio SpecDistributionModulesMonitoringMimirBackend = "minio" + SpecDistributionModulesMonitoringMimirBackendExternalEndpoint SpecDistributionModulesMonitoringMimirBackend = "externalEndpoint" +) + +// Configuration for Mimir's external storage backend. +type SpecDistributionModulesMonitoringMimirExternalEndpoint struct { + // The access key ID (username) for the external S3-compatible bucket. + AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` + + // The bucket name of the external S3-compatible object storage. + BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` + + // The external S3-compatible endpoint for Mimir's storage. + Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` + + // If true, will use HTTP as protocol instead of HTTPS. + Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` + + // The secret access key (password) for the external S3-compatible bucket. + SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` +} + +// Configuration for the Mimir package. +type SpecDistributionModulesMonitoringMimir struct { + // The storage backend type for Mimir. `minio` will use an in-cluster MinIO + // deployment for object storage, `externalEndpoint` can be used to point to an + // external S3-compatible object storage instead of deploying an in-cluster MinIO. + Backend *SpecDistributionModulesMonitoringMimirBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` + + // Configuration for Mimir's external storage backend. + ExternalEndpoint *SpecDistributionModulesMonitoringMimirExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // The retention time for the logs stored in Mimir. Default is `30d`. Value must + // match the regular expression `[0-9]+(ns|us|µs|ms|s|m|h|d|w|y)` where y = 365 + // days. + RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` +} + +type SpecDistributionModulesMonitoringMinioRootUser struct { + // The password for the default MinIO root user. + Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` + + // The username for the default MinIO root user. + Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` +} + +// Configuration for Monitoring's MinIO deployment. +type SpecDistributionModulesMonitoringMinio struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // RootUser corresponds to the JSON schema field "rootUser". + RootUser *SpecDistributionModulesMonitoringMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` + + // The PVC size for each MinIO disk, 6 disks total. + StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` +} + +type SpecDistributionModulesMonitoringPrometheusRemoteWriteElem map[string]interface{} + +type SpecDistributionModulesMonitoringPrometheus struct { + // Set this option to ship the collected metrics to a remote Prometheus receiver. + // + // `remoteWrite` is an array of objects that allows configuring the + // [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for + // Prometheus. The objects in the array follow [the same schema as in the + // prometheus + // operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec). + RemoteWrite []SpecDistributionModulesMonitoringPrometheusRemoteWriteElem `json:"remoteWrite,omitempty" yaml:"remoteWrite,omitempty" mapstructure:"remoteWrite,omitempty"` + + // Resources corresponds to the JSON schema field "resources". + Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` + + // The retention size for the `k8s` Prometheus instance. + RetentionSize *string `json:"retentionSize,omitempty" yaml:"retentionSize,omitempty" mapstructure:"retentionSize,omitempty"` + + // The retention time for the `k8s` Prometheus instance. + RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` + + // The storage size for the `k8s` Prometheus instance. + StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` +} + +type SpecDistributionModulesMonitoringPrometheusAgentRemoteWriteElem map[string]interface{} + +type SpecDistributionModulesMonitoringPrometheusAgent struct { + // Set this option to ship the collected metrics to a remote Prometheus receiver. + // + // `remoteWrite` is an array of objects that allows configuring the + // [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for + // Prometheus. The objects in the array follow [the same schema as in the + // prometheus + // operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec). + RemoteWrite []SpecDistributionModulesMonitoringPrometheusAgentRemoteWriteElem `json:"remoteWrite,omitempty" yaml:"remoteWrite,omitempty" mapstructure:"remoteWrite,omitempty"` + + // Resources corresponds to the JSON schema field "resources". + Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` +} + +type SpecDistributionModulesMonitoringType string + +var enumValues_SpecDistributionModulesMonitoringType = []interface{}{ + "none", + "prometheus", + "prometheusAgent", + "mimir", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesMonitoringType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesIngressNginxType { + for _, expected := range enumValues_SpecDistributionModulesMonitoringType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringType, v) } - *j = SpecDistributionModulesIngressNginxType(v) + *j = SpecDistributionModulesMonitoringType(v) return nil } -var enumValues_SpecDistributionModulesIngressNginxType = []interface{}{ - "none", - "single", - "dual", +const ( + SpecDistributionModulesMonitoringTypeNone SpecDistributionModulesMonitoringType = "none" + SpecDistributionModulesMonitoringTypePrometheus SpecDistributionModulesMonitoringType = "prometheus" + SpecDistributionModulesMonitoringTypePrometheusAgent SpecDistributionModulesMonitoringType = "prometheusAgent" + SpecDistributionModulesMonitoringTypeMimir SpecDistributionModulesMonitoringType = "mimir" +) + +type SpecDistributionModulesMonitoringX509Exporter struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +// Configuration for the Monitoring module. +type SpecDistributionModulesMonitoring struct { + // Alertmanager corresponds to the JSON schema field "alertmanager". + Alertmanager *SpecDistributionModulesMonitoringAlertManager `json:"alertmanager,omitempty" yaml:"alertmanager,omitempty" mapstructure:"alertmanager,omitempty"` + + // BlackboxExporter corresponds to the JSON schema field "blackboxExporter". + BlackboxExporter *SpecDistributionModulesMonitoringBlackboxExporter `json:"blackboxExporter,omitempty" yaml:"blackboxExporter,omitempty" mapstructure:"blackboxExporter,omitempty"` + + // Grafana corresponds to the JSON schema field "grafana". + Grafana *SpecDistributionModulesMonitoringGrafana `json:"grafana,omitempty" yaml:"grafana,omitempty" mapstructure:"grafana,omitempty"` + + // KubeStateMetrics corresponds to the JSON schema field "kubeStateMetrics". + KubeStateMetrics *SpecDistributionModulesMonitoringKubeStateMetrics `json:"kubeStateMetrics,omitempty" yaml:"kubeStateMetrics,omitempty" mapstructure:"kubeStateMetrics,omitempty"` + + // Mimir corresponds to the JSON schema field "mimir". + Mimir *SpecDistributionModulesMonitoringMimir `json:"mimir,omitempty" yaml:"mimir,omitempty" mapstructure:"mimir,omitempty"` + + // Minio corresponds to the JSON schema field "minio". + Minio *SpecDistributionModulesMonitoringMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // Prometheus corresponds to the JSON schema field "prometheus". + Prometheus *SpecDistributionModulesMonitoringPrometheus `json:"prometheus,omitempty" yaml:"prometheus,omitempty" mapstructure:"prometheus,omitempty"` + + // PrometheusAgent corresponds to the JSON schema field "prometheusAgent". + PrometheusAgent *SpecDistributionModulesMonitoringPrometheusAgent `json:"prometheusAgent,omitempty" yaml:"prometheusAgent,omitempty" mapstructure:"prometheusAgent,omitempty"` + + // The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or + // `mimir`. + // + // - `none`: will disable the whole monitoring stack. + // - `prometheus`: will install Prometheus Operator and a preconfigured Prometheus + // instance, Alertmanager, a set of alert rules, exporters needed to monitor all + // the components of the cluster, Grafana and a series of dashboards to view the + // collected metrics, and more. + // - `prometheusAgent`: will install Prometheus operator, an instance of + // Prometheus in Agent mode (no alerting, no queries, no storage), and all the + // exporters needed to get metrics for the status of the cluster and the + // workloads. Useful when having a centralized (remote) Prometheus where to ship + // the metrics and not storing them locally in the cluster. + // - `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir + // that allows for longer retention of metrics and the usage of Object Storage. + // + // Default is `prometheus`. + Type SpecDistributionModulesMonitoringType `json:"type" yaml:"type" mapstructure:"type"` + + // X509Exporter corresponds to the JSON schema field "x509Exporter". + X509Exporter *SpecDistributionModulesMonitoringX509Exporter `json:"x509Exporter,omitempty" yaml:"x509Exporter,omitempty" mapstructure:"x509Exporter,omitempty"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyKyverno) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesMonitoring) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { - return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyKyverno: required") - } - if v, ok := raw["validationFailureAction"]; !ok || v == nil { - return fmt.Errorf("field validationFailureAction in SpecDistributionModulesPolicyKyverno: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesMonitoring: required") } - type Plain SpecDistributionModulesPolicyKyverno + type Plain SpecDistributionModulesMonitoring var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesPolicyKyverno(plain) + *j = SpecDistributionModulesMonitoring(plain) return nil } +type TypesCidr string + +type SpecDistributionModulesNetworkingCilium struct { + // The mask size to use for the Pods network on each node. + MaskSize string `json:"maskSize" yaml:"maskSize" mapstructure:"maskSize"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // Allows specifing a CIDR for the Pods network different from + // `.spec.kubernetes.podCidr`. If not set the default is to use + // `.spec.kubernetes.podCidr`. + PodCidr TypesCidr `json:"podCidr" yaml:"podCidr" mapstructure:"podCidr"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxTLS) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesNetworkingCilium) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["provider"]; !ok || v == nil { - return fmt.Errorf("field provider in SpecDistributionModulesIngressNginxTLS: required") + if v, ok := raw["maskSize"]; !ok || v == nil { + return fmt.Errorf("field maskSize in SpecDistributionModulesNetworkingCilium: required") } - type Plain SpecDistributionModulesIngressNginxTLS + if v, ok := raw["podCidr"]; !ok || v == nil { + return fmt.Errorf("field podCidr in SpecDistributionModulesNetworkingCilium: required") + } + type Plain SpecDistributionModulesNetworkingCilium var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressNginxTLS(plain) + *j = SpecDistributionModulesNetworkingCilium(plain) return nil } -var enumValues_SpecDistributionModulesPolicyType = []interface{}{ +type SpecDistributionModulesNetworkingTigeraOperator struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +type SpecDistributionModulesNetworkingType string + +var enumValues_SpecDistributionModulesNetworkingType = []interface{}{ "none", - "gatekeeper", - "kyverno", + "calico", + "cilium", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesNetworkingType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesPolicyType { + for _, expected := range enumValues_SpecDistributionModulesNetworkingType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesNetworkingType, v) } - *j = SpecDistributionModulesPolicyType(v) + *j = SpecDistributionModulesNetworkingType(v) return nil } +const ( + SpecDistributionModulesNetworkingTypeNone SpecDistributionModulesNetworkingType = "none" + SpecDistributionModulesNetworkingTypeCalico SpecDistributionModulesNetworkingType = "calico" + SpecDistributionModulesNetworkingTypeCilium SpecDistributionModulesNetworkingType = "cilium" +) + +// Configuration for the Networking module. +type SpecDistributionModulesNetworking struct { + // Cilium corresponds to the JSON schema field "cilium". + Cilium *SpecDistributionModulesNetworkingCilium `json:"cilium,omitempty" yaml:"cilium,omitempty" mapstructure:"cilium,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // TigeraOperator corresponds to the JSON schema field "tigeraOperator". + TigeraOperator *SpecDistributionModulesNetworkingTigeraOperator `json:"tigeraOperator,omitempty" yaml:"tigeraOperator,omitempty" mapstructure:"tigeraOperator,omitempty"` + + // The type of CNI plugin to use, either `none`, `calico` (Tigera Operator) or + // `cilium`. + Type SpecDistributionModulesNetworkingType `json:"type" yaml:"type" mapstructure:"type"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxTLSSecret) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesNetworking) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["ca"]; !ok || v == nil { - return fmt.Errorf("field ca in SpecDistributionModulesIngressNginxTLSSecret: required") - } - if v, ok := raw["cert"]; !ok || v == nil { - return fmt.Errorf("field cert in SpecDistributionModulesIngressNginxTLSSecret: required") - } - if v, ok := raw["key"]; !ok || v == nil { - return fmt.Errorf("field key in SpecDistributionModulesIngressNginxTLSSecret: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesNetworking: required") } - type Plain SpecDistributionModulesIngressNginxTLSSecret + type Plain SpecDistributionModulesNetworking var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressNginxTLSSecret(plain) + *j = SpecDistributionModulesNetworking(plain) return nil } +type SpecDistributionModulesPolicyGatekeeperEnforcementAction string + +var enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction = []interface{}{ + "deny", + "dryrun", + "warn", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxTLSProvider) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesPolicyGatekeeperEnforcementAction) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesIngressNginxTLSProvider { + for _, expected := range enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxTLSProvider, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction, v) } - *j = SpecDistributionModulesIngressNginxTLSProvider(v) + *j = SpecDistributionModulesPolicyGatekeeperEnforcementAction(v) return nil } -var enumValues_SpecDistributionModulesIngressNginxTLSProvider = []interface{}{ - "certManager", - "secret", - "none", -} +const ( + SpecDistributionModulesPolicyGatekeeperEnforcementActionDeny SpecDistributionModulesPolicyGatekeeperEnforcementAction = "deny" + SpecDistributionModulesPolicyGatekeeperEnforcementActionDryrun SpecDistributionModulesPolicyGatekeeperEnforcementAction = "dryrun" + SpecDistributionModulesPolicyGatekeeperEnforcementActionWarn SpecDistributionModulesPolicyGatekeeperEnforcementAction = "warn" +) -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressCertManager) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["clusterIssuer"]; !ok || v == nil { - return fmt.Errorf("field clusterIssuer in SpecDistributionModulesIngressCertManager: required") - } - type Plain SpecDistributionModulesIngressCertManager - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesIngressCertManager(plain) - return nil -} +// Configuration for the Gatekeeper package. +type SpecDistributionModulesPolicyGatekeeper struct { + // This parameter adds namespaces to Gatekeeper's exemption list, so it will not + // enforce the constraints on them. + AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"` -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicy) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesPolicy: required") - } - type Plain SpecDistributionModulesPolicy - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesPolicy(plain) - return nil + // The default enforcement action to use for the included constraints. `deny` will + // block the admission when violations to the policies are found, `warn` will show + // a message to the user but will admit the violating requests and `dryrun` won't + // give any feedback to the user but it will log the violations. + EnforcementAction SpecDistributionModulesPolicyGatekeeperEnforcementAction `json:"enforcementAction" yaml:"enforcementAction" mapstructure:"enforcementAction"` + + // Set to `false` to avoid installing the default Gatekeeper policies (constraints + // templates and constraints) included with the distribution. + InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressCertManagerClusterIssuer) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesPolicyGatekeeper) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["email"]; !ok || v == nil { - return fmt.Errorf("field email in SpecDistributionModulesIngressCertManagerClusterIssuer: required") + if v, ok := raw["enforcementAction"]; !ok || v == nil { + return fmt.Errorf("field enforcementAction in SpecDistributionModulesPolicyGatekeeper: required") } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionModulesIngressCertManagerClusterIssuer: required") + if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { + return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyGatekeeper: required") } - type Plain SpecDistributionModulesIngressCertManagerClusterIssuer + type Plain SpecDistributionModulesPolicyGatekeeper var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressCertManagerClusterIssuer(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressCertManagerClusterIssuerType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType, v) - } - *j = SpecDistributionModulesIngressCertManagerClusterIssuerType(v) + *j = SpecDistributionModulesPolicyGatekeeper(plain) return nil } -var enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType = []interface{}{ - "http01", -} - -var enumValues_SpecDistributionModulesTracingTempoBackend = []interface{}{ - "minio", - "externalEndpoint", -} +type SpecDistributionModulesPolicyKyvernoValidationFailureAction string -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesTracingTempoBackend) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesTracingTempoBackend { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingTempoBackend, v) - } - *j = SpecDistributionModulesTracingTempoBackend(v) - return nil +var enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction = []interface{}{ + "Audit", + "Enforce", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDr) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressNginxTLS) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesDr: required") + if v, ok := raw["provider"]; !ok || v == nil { + return fmt.Errorf("field provider in SpecDistributionModulesIngressNginxTLS: required") } - type Plain SpecDistributionModulesDr + type Plain SpecDistributionModulesIngressNginxTLS var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesDr(plain) + *j = SpecDistributionModulesIngressNginxTLS(plain) return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDrVeleroBackend) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesDrVeleroBackend { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrVeleroBackend, v) - } - *j = SpecDistributionModulesDrVeleroBackend(v) - return nil -} +const ( + SpecDistributionModulesPolicyKyvernoValidationFailureActionAudit SpecDistributionModulesPolicyKyvernoValidationFailureAction = "Audit" + SpecDistributionModulesPolicyKyvernoValidationFailureActionEnforce SpecDistributionModulesPolicyKyvernoValidationFailureAction = "Enforce" +) -var enumValues_SpecDistributionModulesDrVeleroBackend = []interface{}{ - "minio", - "externalEndpoint", +// Configuration for the Kyverno package. +type SpecDistributionModulesPolicyKyverno struct { + // This parameter adds namespaces to Kyverno's exemption list, so it will not + // enforce the policies on them. + AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"` + + // Set to `false` to avoid installing the default Kyverno policies included with + // distribution. + InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // The validation failure action to use for the policies, `Enforce` will block + // when a request does not comply with the policies and `Audit` will not block but + // log when a request does not comply with the policies. + ValidationFailureAction SpecDistributionModulesPolicyKyvernoValidationFailureAction `json:"validationFailureAction" yaml:"validationFailureAction" mapstructure:"validationFailureAction"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDrType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecDistributionModulesPolicyKyverno) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesDrType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { + return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyKyverno: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrType, v) + if v, ok := raw["validationFailureAction"]; !ok || v == nil { + return fmt.Errorf("field validationFailureAction in SpecDistributionModulesPolicyKyverno: required") } - *j = SpecDistributionModulesDrType(v) + type Plain SpecDistributionModulesPolicyKyverno + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesPolicyKyverno(plain) return nil } -var enumValues_SpecDistributionModulesDrType = []interface{}{ - "none", - "on-premises", -} +type SpecDistributionModulesPolicyType string -var enumValues_SpecDistributionModulesTracingType = []interface{}{ +var enumValues_SpecDistributionModulesPolicyType = []interface{}{ "none", - "tempo", + "gatekeeper", + "kyverno", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesTracingType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesPolicyType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesTracingType { + for _, expected := range enumValues_SpecDistributionModulesPolicyType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyType, v) } - *j = SpecDistributionModulesTracingType(v) + *j = SpecDistributionModulesPolicyType(v) return nil } -type TypesFuryModuleOverridesIngress struct { - // If true, the ingress will not have authentication - DisableAuth *bool `json:"disableAuth,omitempty" yaml:"disableAuth,omitempty" mapstructure:"disableAuth,omitempty"` +const ( + SpecDistributionModulesPolicyTypeNone SpecDistributionModulesPolicyType = "none" + SpecDistributionModulesPolicyTypeGatekeeper SpecDistributionModulesPolicyType = "gatekeeper" + SpecDistributionModulesPolicyTypeKyverno SpecDistributionModulesPolicyType = "kyverno" +) - // The host of the ingress - Host *string `json:"host,omitempty" yaml:"host,omitempty" mapstructure:"host,omitempty"` +// Configuration for the Policy module. +type SpecDistributionModulesPolicy struct { + // Gatekeeper corresponds to the JSON schema field "gatekeeper". + Gatekeeper *SpecDistributionModulesPolicyGatekeeper `json:"gatekeeper,omitempty" yaml:"gatekeeper,omitempty" mapstructure:"gatekeeper,omitempty"` - // The ingress class of the ingress - IngressClass *string `json:"ingressClass,omitempty" yaml:"ingressClass,omitempty" mapstructure:"ingressClass,omitempty"` -} + // Kyverno corresponds to the JSON schema field "kyverno". + Kyverno *SpecDistributionModulesPolicyKyverno `json:"kyverno,omitempty" yaml:"kyverno,omitempty" mapstructure:"kyverno,omitempty"` -type TypesFuryModuleOverridesIngresses map[string]TypesFuryModuleOverridesIngress + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuth) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["provider"]; !ok || v == nil { - return fmt.Errorf("field provider in SpecDistributionModulesAuth: required") - } - type Plain SpecDistributionModulesAuth - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesAuth(plain) - return nil + // The type of policy enforcement to use, either `none`, `gatekeeper` or + // `kyverno`. + // + // Default is `none`. + Type SpecDistributionModulesPolicyType `json:"type" yaml:"type" mapstructure:"type"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesTracing) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesPolicy) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesTracing: required") + return fmt.Errorf("field type in SpecDistributionModulesPolicy: required") } - type Plain SpecDistributionModulesTracing + type Plain SpecDistributionModulesPolicy var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesTracing(plain) + *j = SpecDistributionModulesPolicy(plain) return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthProvider) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesAuthProvider: required") - } - type Plain SpecDistributionModulesAuthProvider - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesAuthProvider(plain) - return nil +type SpecDistributionModulesTracingMinioRootUser struct { + // The password for the default MinIO root user. + Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` + + // The username for the default MinIO root user. + Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` +} + +// Configuration for Tracing's MinIO deployment. +type SpecDistributionModulesTracingMinio struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // RootUser corresponds to the JSON schema field "rootUser". + RootUser *SpecDistributionModulesTracingMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` + + // The PVC size for each MinIO disk, 6 disks total. + StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` +} + +type SpecDistributionModulesTracingTempoBackend string + +var enumValues_SpecDistributionModulesTracingTempoBackend = []interface{}{ + "minio", + "externalEndpoint", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesMonitoringType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesTracingTempoBackend) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesMonitoringType { + for _, expected := range enumValues_SpecDistributionModulesTracingTempoBackend { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingTempoBackend, v) } - *j = SpecDistributionModulesMonitoringType(v) + *j = SpecDistributionModulesTracingTempoBackend(v) return nil } +const ( + SpecDistributionModulesTracingTempoBackendMinio SpecDistributionModulesTracingTempoBackend = "minio" + SpecDistributionModulesTracingTempoBackendExternalEndpoint SpecDistributionModulesTracingTempoBackend = "externalEndpoint" +) + +// Configuration for Tempo's external storage backend. +type SpecDistributionModulesTracingTempoExternalEndpoint struct { + // The access key ID (username) for the external S3-compatible bucket. + AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` + + // The bucket name of the external S3-compatible object storage. + BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` + + // The external S3-compatible endpoint for Tempo's storage. + Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` + + // If true, will use HTTP as protocol instead of HTTPS. + Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` + + // The secret access key (password) for the external S3-compatible bucket. + SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` +} + +// Configuration for the Tempo package. +type SpecDistributionModulesTracingTempo struct { + // The storage backend type for Tempo. `minio` will use an in-cluster MinIO + // deployment for object storage, `externalEndpoint` can be used to point to an + // external S3-compatible object storage instead of deploying an in-cluster MinIO. + Backend *SpecDistributionModulesTracingTempoBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` + + // Configuration for Tempo's external storage backend. + ExternalEndpoint *SpecDistributionModulesTracingTempoExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // The retention time for the traces stored in Tempo. + RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` +} + +type SpecDistributionModulesTracingType string + +var enumValues_SpecDistributionModulesTracingType = []interface{}{ + "none", + "tempo", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthProviderType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesTracingType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesAuthProviderType { + for _, expected := range enumValues_SpecDistributionModulesTracingType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesAuthProviderType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingType, v) } - *j = SpecDistributionModulesAuthProviderType(v) + *j = SpecDistributionModulesTracingType(v) return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistribution) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["kubeconfig"]; !ok || v == nil { - return fmt.Errorf("field kubeconfig in SpecDistribution: required") - } - if v, ok := raw["modules"]; !ok || v == nil { - return fmt.Errorf("field modules in SpecDistribution: required") - } - type Plain SpecDistribution - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistribution(plain) - return nil -} +const ( + SpecDistributionModulesTracingTypeNone SpecDistributionModulesTracingType = "none" + SpecDistributionModulesTracingTypeTempo SpecDistributionModulesTracingType = "tempo" +) -var enumValues_SpecDistributionModulesAuthProviderType = []interface{}{ - "none", - "basicAuth", - "sso", +// Configuration for the Tracing module. +type SpecDistributionModulesTracing struct { + // Minio corresponds to the JSON schema field "minio". + Minio *SpecDistributionModulesTracingMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // Tempo corresponds to the JSON schema field "tempo". + Tempo *SpecDistributionModulesTracingTempo `json:"tempo,omitempty" yaml:"tempo,omitempty" mapstructure:"tempo,omitempty"` + + // The type of tracing to use, either `none` or `tempo`. `none` will disable the + // Tracing module and `tempo` will install a Grafana Tempo deployment. + // + // Default is `tempo`. + Type SpecDistributionModulesTracingType `json:"type" yaml:"type" mapstructure:"type"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecPluginsHelmReleasesElemSetElem) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesTracing) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecPluginsHelmReleasesElemSetElem: required") - } - if v, ok := raw["value"]; !ok || v == nil { - return fmt.Errorf("field value in SpecPluginsHelmReleasesElemSetElem: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesTracing: required") } - type Plain SpecPluginsHelmReleasesElemSetElem + type Plain SpecDistributionModulesTracing var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecPluginsHelmReleasesElemSetElem(plain) + *j = SpecDistributionModulesTracing(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthProviderBasicAuth) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionCustomPatchesConfigMapGeneratorResource) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["password"]; !ok || v == nil { - return fmt.Errorf("field password in SpecDistributionModulesAuthProviderBasicAuth: required") - } - if v, ok := raw["username"]; !ok || v == nil { - return fmt.Errorf("field username in SpecDistributionModulesAuthProviderBasicAuth: required") + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionCustomPatchesConfigMapGeneratorResource: required") } - type Plain SpecDistributionModulesAuthProviderBasicAuth + type Plain SpecDistributionCustomPatchesConfigMapGeneratorResource var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthProviderBasicAuth(plain) + *j = SpecDistributionCustomPatchesConfigMapGeneratorResource(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthOverridesIngress) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModules) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["host"]; !ok || v == nil { - return fmt.Errorf("field host in SpecDistributionModulesAuthOverridesIngress: required") + if v, ok := raw["dr"]; !ok || v == nil { + return fmt.Errorf("field dr in SpecDistributionModules: required") } - if v, ok := raw["ingressClass"]; !ok || v == nil { - return fmt.Errorf("field ingressClass in SpecDistributionModulesAuthOverridesIngress: required") + if v, ok := raw["ingress"]; !ok || v == nil { + return fmt.Errorf("field ingress in SpecDistributionModules: required") } - type Plain SpecDistributionModulesAuthOverridesIngress + if v, ok := raw["logging"]; !ok || v == nil { + return fmt.Errorf("field logging in SpecDistributionModules: required") + } + if v, ok := raw["policy"]; !ok || v == nil { + return fmt.Errorf("field policy in SpecDistributionModules: required") + } + type Plain SpecDistributionModules var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthOverridesIngress(plain) + *j = SpecDistributionModules(plain) return nil } +type TypesKubeLabels map[string]string + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthDex) UnmarshalJSON(b []byte) error { +func (j *SpecDistribution) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["connectors"]; !ok || v == nil { - return fmt.Errorf("field connectors in SpecDistributionModulesAuthDex: required") + if v, ok := raw["kubeconfig"]; !ok || v == nil { + return fmt.Errorf("field kubeconfig in SpecDistribution: required") + } + if v, ok := raw["modules"]; !ok || v == nil { + return fmt.Errorf("field modules in SpecDistribution: required") } - type Plain SpecDistributionModulesAuthDex + type Plain SpecDistribution var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthDex(plain) + *j = SpecDistribution(plain) return nil } +type SpecPluginsHelmReleasesElemSetElem struct { + // The name of the set + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // The value of the set + Value string `json:"value" yaml:"value" mapstructure:"value"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesSecretGeneratorResource) UnmarshalJSON(b []byte) error { +func (j *SpecPluginsHelmReleasesElemSetElem) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionCustomPatchesSecretGeneratorResource: required") + return fmt.Errorf("field name in SpecPluginsHelmReleasesElemSetElem: required") } - type Plain SpecDistributionCustomPatchesSecretGeneratorResource + if v, ok := raw["value"]; !ok || v == nil { + return fmt.Errorf("field value in SpecPluginsHelmReleasesElemSetElem: required") + } + type Plain SpecPluginsHelmReleasesElemSetElem var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionCustomPatchesSecretGeneratorResource(plain) + *j = SpecPluginsHelmReleasesElemSetElem(plain) return nil } +type SpecPluginsHelmReleases []struct { + // The chart of the release + Chart string `json:"chart" yaml:"chart" mapstructure:"chart"` + + // Disable running `helm diff` validation when installing the plugin, it will + // still be done when upgrading. + DisableValidationOnInstall *bool `json:"disableValidationOnInstall,omitempty" yaml:"disableValidationOnInstall,omitempty" mapstructure:"disableValidationOnInstall,omitempty"` + + // The name of the release + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // The namespace of the release + Namespace string `json:"namespace" yaml:"namespace" mapstructure:"namespace"` + + // Set corresponds to the JSON schema field "set". + Set []SpecPluginsHelmReleasesElemSetElem `json:"set,omitempty" yaml:"set,omitempty" mapstructure:"set,omitempty"` + + // The values of the release + Values []string `json:"values,omitempty" yaml:"values,omitempty" mapstructure:"values,omitempty"` + + // The version of the release + Version *string `json:"version,omitempty" yaml:"version,omitempty" mapstructure:"version,omitempty"` +} + +type SpecPluginsHelmRepositories []struct { + // The name of the repository + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // The url of the repository + Url string `json:"url" yaml:"url" mapstructure:"url"` +} + +type SpecPluginsHelm struct { + // Releases corresponds to the JSON schema field "releases". + Releases SpecPluginsHelmReleases `json:"releases,omitempty" yaml:"releases,omitempty" mapstructure:"releases,omitempty"` + + // Repositories corresponds to the JSON schema field "repositories". + Repositories SpecPluginsHelmRepositories `json:"repositories,omitempty" yaml:"repositories,omitempty" mapstructure:"repositories,omitempty"` +} + +type SpecPluginsKustomize []struct { + // The folder of the kustomize plugin + Folder string `json:"folder" yaml:"folder" mapstructure:"folder"` + + // The name of the kustomize plugin + Name string `json:"name" yaml:"name" mapstructure:"name"` +} + +type SpecPlugins struct { + // Helm corresponds to the JSON schema field "helm". + Helm *SpecPluginsHelm `json:"helm,omitempty" yaml:"helm,omitempty" mapstructure:"helm,omitempty"` + + // Kustomize corresponds to the JSON schema field "kustomize". + Kustomize SpecPluginsKustomize `json:"kustomize,omitempty" yaml:"kustomize,omitempty" mapstructure:"kustomize,omitempty"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesSecretGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior { + for _, expected := range enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior, v) } - *j = SpecDistributionCustomPatchesSecretGeneratorResourceBehavior(v) + *j = SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior(v) return nil } -var enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = []interface{}{ - "create", - "replace", - "merge", -} - // UnmarshalJSON implements json.Unmarshaler. func (j *Spec) UnmarshalJSON(b []byte) error { var raw map[string]interface{} @@ -2368,46 +2626,6 @@ func (j *Spec) UnmarshalJSON(b []byte) error { return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesConfigMapGeneratorResource) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionCustomPatchesConfigMapGeneratorResource: required") - } - type Plain SpecDistributionCustomPatchesConfigMapGeneratorResource - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionCustomPatchesConfigMapGeneratorResource(plain) - return nil -} - -type TypesKubeLabels map[string]string - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior, v) - } - *j = SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior(v) - return nil -} - var enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = []interface{}{ "create", "replace", @@ -2474,15 +2692,39 @@ func (j *TypesKubeTolerationOperator) UnmarshalJSON(b []byte) error { return nil } -const TypesKubeTolerationEffectNoExecute TypesKubeTolerationEffect = "NoExecute" - -type TypesKubeTolerationOperator string - var enumValues_TypesKubeTolerationOperator = []interface{}{ "Exists", "Equal", } +type TypesKubeTolerationOperator string + +const ( + TypesKubeTolerationEffectNoExecute TypesKubeTolerationEffect = "NoExecute" + TypesKubeTolerationEffectPreferNoSchedule TypesKubeTolerationEffect = "PreferNoSchedule" + TypesKubeTolerationEffectNoSchedule TypesKubeTolerationEffect = "NoSchedule" +) + +// UnmarshalJSON implements json.Unmarshaler. +func (j *TypesKubeTolerationEffect) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_TypesKubeTolerationEffect { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationEffect, v) + } + *j = TypesKubeTolerationEffect(v) + return nil +} + // UnmarshalJSON implements json.Unmarshaler. func (j *SpecDistributionModulesAuthPomeriumSecrets) UnmarshalJSON(b []byte) error { var raw map[string]interface{} @@ -2541,9 +2783,9 @@ func (j *TypesKubeTolerationEffect_1) UnmarshalJSON(b []byte) error { } const ( - TypesKubeTolerationEffect_1_NoExecute TypesKubeTolerationEffect_1 = "NoExecute" TypesKubeTolerationEffect_1_NoSchedule TypesKubeTolerationEffect_1 = "NoSchedule" TypesKubeTolerationEffect_1_PreferNoSchedule TypesKubeTolerationEffect_1 = "PreferNoSchedule" + TypesKubeTolerationEffect_1_NoExecute TypesKubeTolerationEffect_1 = "NoExecute" ) type TypesKubeTolerationOperator_1 string @@ -2616,31 +2858,22 @@ func (j *TypesKubeToleration_1) UnmarshalJSON(b []byte) error { return nil } -const ( - TypesKubeTolerationEffectPreferNoSchedule TypesKubeTolerationEffect = "PreferNoSchedule" - TypesKubeTolerationEffectNoSchedule TypesKubeTolerationEffect = "NoSchedule" -) +type TypesFuryModuleComponentOverrides_1 struct { + // NodeSelector corresponds to the JSON schema field "nodeSelector". + NodeSelector TypesKubeNodeSelector_1 `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` -// UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeTolerationEffect) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_TypesKubeTolerationEffect { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationEffect, v) - } - *j = TypesKubeTolerationEffect(v) - return nil + // Tolerations corresponds to the JSON schema field "tolerations". + Tolerations []TypesKubeToleration_1 `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +} + +var enumValues_TypesKubeTolerationEffect = []interface{}{ + "NoSchedule", + "PreferNoSchedule", + "NoExecute", } +type TypesKubeTolerationEffect string + // UnmarshalJSON implements json.Unmarshaler. func (j *SpecDistributionModulesAuthPomerium_2) UnmarshalJSON(b []byte) error { var raw map[string]interface{} @@ -2659,19 +2892,82 @@ func (j *SpecDistributionModulesAuthPomerium_2) UnmarshalJSON(b []byte) error { return nil } -var enumValues_TypesKubeTolerationEffect = []interface{}{ +type TypesEnvRef string + +type TypesFileRef string + +type TypesIpAddress string + +type TypesKubeLabels_1 map[string]string + +type TypesKubeTaintsEffect string + +var enumValues_TypesKubeTaintsEffect = []interface{}{ "NoSchedule", "PreferNoSchedule", "NoExecute", } -type TypesKubeTolerationEffect string +// UnmarshalJSON implements json.Unmarshaler. +func (j *TypesKubeTaintsEffect) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_TypesKubeTaintsEffect { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTaintsEffect, v) + } + *j = TypesKubeTaintsEffect(v) + return nil +} -type TypesIpAddress string +const ( + TypesKubeTaintsEffectNoSchedule TypesKubeTaintsEffect = "NoSchedule" + TypesKubeTaintsEffectPreferNoSchedule TypesKubeTaintsEffect = "PreferNoSchedule" + TypesKubeTaintsEffectNoExecute TypesKubeTaintsEffect = "NoExecute" +) -type TypesKubeLabels_1 map[string]string +type TypesKubeTaints struct { + // Effect corresponds to the JSON schema field "effect". + Effect TypesKubeTaintsEffect `json:"effect" yaml:"effect" mapstructure:"effect"` -type TypesKubeTaints []string + // Key corresponds to the JSON schema field "key". + Key string `json:"key" yaml:"key" mapstructure:"key"` + + // Value corresponds to the JSON schema field "value". + Value string `json:"value" yaml:"value" mapstructure:"value"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *TypesKubeTaints) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["effect"]; !ok || v == nil { + return fmt.Errorf("field effect in TypesKubeTaints: required") + } + if v, ok := raw["key"]; !ok || v == nil { + return fmt.Errorf("field key in TypesKubeTaints: required") + } + if v, ok := raw["value"]; !ok || v == nil { + return fmt.Errorf("field value in TypesKubeTaints: required") + } + type Plain TypesKubeTaints + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = TypesKubeTaints(plain) + return nil +} type TypesSemVer string diff --git a/pkg/apis/onpremises/v1alpha2/public/schema.go b/pkg/apis/onpremises/v1alpha2/public/schema.go index 4bed7ebe0..3d0b8199b 100644 --- a/pkg/apis/onpremises/v1alpha2/public/schema.go +++ b/pkg/apis/onpremises/v1alpha2/public/schema.go @@ -6,13 +6,17 @@ import ( "encoding/json" "fmt" "reflect" + + "github.com/sighupio/go-jsonschema/pkg/types" ) type Metadata struct { - // Name corresponds to the JSON schema field "name". + // The name of the cluster. It will also be used as a prefix for all the other + // resources created. Name string `json:"name" yaml:"name" mapstructure:"name"` } +// A KFD Cluster deployed on top of a set of existing VMs. type OnpremisesKfdV1Alpha2 struct { // ApiVersion corresponds to the JSON schema field "apiVersion". ApiVersion string `json:"apiVersion" yaml:"apiVersion" mapstructure:"apiVersion"` @@ -35,7 +39,9 @@ type Spec struct { // Distribution corresponds to the JSON schema field "distribution". Distribution SpecDistribution `json:"distribution" yaml:"distribution" mapstructure:"distribution"` - // DistributionVersion corresponds to the JSON schema field "distributionVersion". + // Defines which KFD version will be installed and, in consequence, the Kubernetes + // version used to create the cluster. It supports git tags and branches. Example: + // `v1.30.1`. DistributionVersion string `json:"distributionVersion" yaml:"distributionVersion" mapstructure:"distributionVersion"` // Kubernetes corresponds to the JSON schema field "kubernetes". @@ -56,26 +62,42 @@ type SpecDistribution struct { Modules SpecDistributionModules `json:"modules" yaml:"modules" mapstructure:"modules"` } +// Common configuration for all the distribution modules. type SpecDistributionCommon struct { - // The node selector to use to place the pods for all the KFD modules + // EXPERIMENTAL FEATURE. This field defines whether Network Policies are provided + // for core modules. + NetworkPoliciesEnabled *bool `json:"networkPoliciesEnabled,omitempty" yaml:"networkPoliciesEnabled,omitempty" mapstructure:"networkPoliciesEnabled,omitempty"` + + // The node selector to use to place the pods for all the KFD modules. Follows + // Kubernetes selector format. Example: `node.kubernetes.io/role: infra`. NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` // Provider corresponds to the JSON schema field "provider". Provider *SpecDistributionCommonProvider `json:"provider,omitempty" yaml:"provider,omitempty" mapstructure:"provider,omitempty"` // URL of the registry where to pull images from for the Distribution phase. - // (Default is registry.sighup.io/fury). + // (Default is `registry.sighup.io/fury`). + // + // NOTE: If plugins are pulling from the default registry, the registry will be + // replaced for the plugin too. Registry *string `json:"registry,omitempty" yaml:"registry,omitempty" mapstructure:"registry,omitempty"` - // The relative path to the vendor directory, does not need to be changed + // The relative path to the vendor directory, does not need to be changed. RelativeVendorPath *string `json:"relativeVendorPath,omitempty" yaml:"relativeVendorPath,omitempty" mapstructure:"relativeVendorPath,omitempty"` - // The tolerations that will be added to the pods for all the KFD modules + // An array with the tolerations that will be added to the pods for all the KFD + // modules. Follows Kubernetes tolerations format. Example: + // + // ```yaml + // - effect: NoSchedule + // key: node.kubernetes.io/role + // value: infra + // ``` Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } type SpecDistributionCommonProvider struct { - // The type of the provider + // The provider type. Don't set. FOR INTERNAL USE ONLY. Type string `json:"type" yaml:"type" mapstructure:"type"` } @@ -274,8 +296,11 @@ type SpecDistributionModules struct { Tracing *SpecDistributionModulesTracing `json:"tracing,omitempty" yaml:"tracing,omitempty" mapstructure:"tracing,omitempty"` } +// Configuration for the Auth module. type SpecDistributionModulesAuth struct { - // The base domain for the auth module + // The base domain for the ingresses created by the Auth module (Gangplank, + // Pomerium, Dex). Notice that when the ingress module type is `dual`, these will + // use the `external` ingress class. BaseDomain *string `json:"baseDomain,omitempty" yaml:"baseDomain,omitempty" mapstructure:"baseDomain,omitempty"` // Dex corresponds to the JSON schema field "dex". @@ -294,11 +319,25 @@ type SpecDistributionModulesAuth struct { Provider SpecDistributionModulesAuthProvider `json:"provider" yaml:"provider" mapstructure:"provider"` } +// Configuration for the Dex package. type SpecDistributionModulesAuthDex struct { - // The additional static clients for dex + // Additional static clients defitions that will be added to the default clients + // included with the distribution in Dex's configuration. Example: + // + // ```yaml + // additionalStaticClients: + // - id: my-custom-client + // name: "A custom additional static client" + // redirectURIs: + // - "https://myapp.tld/redirect" + // - "https://alias.tld/oidc-callback" + // secret: supersecretpassword + // ``` + // Reference: https://dexidp.io/docs/connectors/local/ AdditionalStaticClients []interface{} `json:"additionalStaticClients,omitempty" yaml:"additionalStaticClients,omitempty" mapstructure:"additionalStaticClients,omitempty"` - // The connectors for dex + // A list with each item defining a Dex connector. Follows Dex connectors + // configuration format: https://dexidp.io/docs/connectors/ Connectors []interface{} `json:"connectors" yaml:"connectors" mapstructure:"connectors"` // Expiry corresponds to the JSON schema field "expiry". @@ -317,54 +356,72 @@ type SpecDistributionModulesAuthDexExpiry struct { } type SpecDistributionModulesAuthOIDCKubernetesAuth struct { - // The client ID for oidc kubernetes auth + // The client ID that the Kubernetes API will use to authenticate against the OIDC + // provider (Dex). ClientID *string `json:"clientID,omitempty" yaml:"clientID,omitempty" mapstructure:"clientID,omitempty"` - // The client secret for oidc kubernetes auth + // The client secret that the Kubernetes API will use to authenticate against the + // OIDC provider (Dex). ClientSecret *string `json:"clientSecret,omitempty" yaml:"clientSecret,omitempty" mapstructure:"clientSecret,omitempty"` - // The email claim for oidc kubernetes auth + // DEPRECATED. Defaults to `email`. EmailClaim *string `json:"emailClaim,omitempty" yaml:"emailClaim,omitempty" mapstructure:"emailClaim,omitempty"` - // If true, oidc kubernetes auth will be enabled + // If true, components needed for interacting with the Kubernetes API with OIDC + // authentication (Gangplank, Dex) be deployed and configued. Enabled bool `json:"enabled" yaml:"enabled" mapstructure:"enabled"` - // The namespace to set in the context of the kubeconfig file + // The namespace to set in the context of the kubeconfig file generated by + // Gangplank. Defaults to `default`. Namespace *string `json:"namespace,omitempty" yaml:"namespace,omitempty" mapstructure:"namespace,omitempty"` - // Set to true to remove the CA from the kubeconfig file + // Set to true to remove the CA from the kubeconfig file generated by Gangplank. RemoveCAFromKubeconfig *bool `json:"removeCAFromKubeconfig,omitempty" yaml:"removeCAFromKubeconfig,omitempty" mapstructure:"removeCAFromKubeconfig,omitempty"` - // The scopes for oidc kubernetes auth + // Used to specify the scope of the requested Oauth authorization by Gangplank. + // Defaults to: `["openid", "profile", "email", "offline_access", "groups"]` Scopes []string `json:"scopes,omitempty" yaml:"scopes,omitempty" mapstructure:"scopes,omitempty"` - // The session security key for oidc kubernetes auth + // The Key to use for the sessions in Gangplank. Must be different between + // different instances of Gangplank. SessionSecurityKey *string `json:"sessionSecurityKey,omitempty" yaml:"sessionSecurityKey,omitempty" mapstructure:"sessionSecurityKey,omitempty"` - // The username claim for oidc kubernetes auth + // The JWT claim to use as the username. This is used in Gangplank's UI. This is + // combined with the clusterName for the user portion of the kubeconfig. Defaults + // to `nickname`. UsernameClaim *string `json:"usernameClaim,omitempty" yaml:"usernameClaim,omitempty" mapstructure:"usernameClaim,omitempty"` } +// Override the common configuration with a particular configuration for the Auth +// module. type SpecDistributionModulesAuthOverrides struct { - // Ingresses corresponds to the JSON schema field "ingresses". - Ingresses SpecDistributionModulesAuthOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` + // Override the definition of the Auth module ingresses. + Ingresses *SpecDistributionModulesAuthOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` - // The node selector to use to place the pods for the auth module + // Set to override the node selector used to place the pods of the Auth module. NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - // The tolerations that will be added to the pods for the auth module + // Set to override the tolerations that will be added to the pods of the Auth + // module. Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } type SpecDistributionModulesAuthOverridesIngress struct { - // The host of the ingress + // Use this host for the ingress instead of the default one. Host string `json:"host" yaml:"host" mapstructure:"host"` - // The ingress class of the ingress + // Use this ingress class for the ingress instead of the default one. IngressClass string `json:"ingressClass" yaml:"ingressClass" mapstructure:"ingressClass"` } -type SpecDistributionModulesAuthOverridesIngresses map[string]SpecDistributionModulesAuthOverridesIngress +// Override the definition of the Auth module ingresses. +type SpecDistributionModulesAuthOverridesIngresses struct { + // Dex corresponds to the JSON schema field "dex". + Dex *SpecDistributionModulesAuthOverridesIngress `json:"dex,omitempty" yaml:"dex,omitempty" mapstructure:"dex,omitempty"` + + // Gangplank corresponds to the JSON schema field "gangplank". + Gangplank *SpecDistributionModulesAuthOverridesIngress `json:"gangplank,omitempty" yaml:"gangplank,omitempty" mapstructure:"gangplank,omitempty"` +} type SpecDistributionModulesAuthPomerium interface{} @@ -488,15 +545,23 @@ type SpecDistributionModulesAuthProvider struct { // BasicAuth corresponds to the JSON schema field "basicAuth". BasicAuth *SpecDistributionModulesAuthProviderBasicAuth `json:"basicAuth,omitempty" yaml:"basicAuth,omitempty" mapstructure:"basicAuth,omitempty"` - // The type of the provider, must be ***none***, ***sso*** or ***basicAuth*** + // The type of the Auth provider, options are: + // - `none`: will disable authentication in the infrastructural ingresses. + // - `sso`: will protect the infrastructural ingresses with Pomerium and Dex (SSO) + // and require authentication before accessing them. + // - `basicAuth`: will protect the infrastructural ingresses with HTTP basic auth + // (username and password) authentication. + // + // Default is `none`. Type SpecDistributionModulesAuthProviderType `json:"type" yaml:"type" mapstructure:"type"` } +// Configuration for the HTTP Basic Auth provider. type SpecDistributionModulesAuthProviderBasicAuth struct { - // The password for the basic auth + // The password for logging in with the HTTP basic authentication. Password string `json:"password" yaml:"password" mapstructure:"password"` - // The username for the basic auth + // The username for logging in with the HTTP basic authentication. Username string `json:"username" yaml:"username" mapstructure:"username"` } @@ -508,11 +573,16 @@ const ( SpecDistributionModulesAuthProviderTypeSso SpecDistributionModulesAuthProviderType = "sso" ) +// Configuration for the Disaster Recovery module. type SpecDistributionModulesDr struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - // The type of the DR, must be ***none*** or ***on-premises*** + // The type of the Disaster Recovery, must be `none` or `on-premises`. `none` + // disables the module and `on-premises` will install Velero and an optional MinIO + // deployment. + // + // Default is `none`. Type SpecDistributionModulesDrType `json:"type" yaml:"type" mapstructure:"type"` // Velero corresponds to the JSON schema field "velero". @@ -526,18 +596,24 @@ const ( SpecDistributionModulesDrTypeOnPremises SpecDistributionModulesDrType = "on-premises" ) +// Configuration for the Velero package. type SpecDistributionModulesDrVelero struct { - // The backend for velero + // The storage backend type for Velero. `minio` will use an in-cluster MinIO + // deployment for object storage, `externalEndpoint` can be used to point to an + // external S3-compatible object storage instead of deploying an in-cluster MinIO. Backend *SpecDistributionModulesDrVeleroBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` - // ExternalEndpoint corresponds to the JSON schema field "externalEndpoint". + // Configuration for Velero's external storage backend. ExternalEndpoint *SpecDistributionModulesDrVeleroExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - // The retention time for velero - RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` + // Configuration for Velero's backup schedules. + Schedules *SpecDistributionModulesDrVeleroSchedules `json:"schedules,omitempty" yaml:"schedules,omitempty" mapstructure:"schedules,omitempty"` + + // Configuration for the additional snapshotController component installation. + SnapshotController *SpecDistributionModulesDrVeleroSnapshotController `json:"snapshotController,omitempty" yaml:"snapshotController,omitempty" mapstructure:"snapshotController,omitempty"` } type SpecDistributionModulesDrVeleroBackend string @@ -547,30 +623,92 @@ const ( SpecDistributionModulesDrVeleroBackendMinio SpecDistributionModulesDrVeleroBackend = "minio" ) +// Configuration for Velero's external storage backend. type SpecDistributionModulesDrVeleroExternalEndpoint struct { - // The access key id for velero backend + // The access key ID (username) for the external S3-compatible bucket. AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` - // The bucket name for velero backend + // The bucket name of the external S3-compatible object storage. BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` - // The endpoint for velero + // External S3-compatible endpoint for Velero's storage. Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` - // If true, the endpoint will be insecure + // If true, will use HTTP as protocol instead of HTTPS. Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` - // The secret access key for velero backend + // The secret access key (password) for the external S3-compatible bucket. SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` } +// Configuration for Velero's backup schedules. +type SpecDistributionModulesDrVeleroSchedules struct { + // Configuration for Velero schedules. + Definitions *SpecDistributionModulesDrVeleroSchedulesDefinitions `json:"definitions,omitempty" yaml:"definitions,omitempty" mapstructure:"definitions,omitempty"` + + // Whether to install or not the default `manifests` and `full` backups schedules. + // Default is `true`. + Install *bool `json:"install,omitempty" yaml:"install,omitempty" mapstructure:"install,omitempty"` +} + +// Configuration for Velero schedules. +type SpecDistributionModulesDrVeleroSchedulesDefinitions struct { + // Configuration for Velero's manifests backup schedule. + Full *SpecDistributionModulesDrVeleroSchedulesDefinitionsFull `json:"full,omitempty" yaml:"full,omitempty" mapstructure:"full,omitempty"` + + // Configuration for Velero's manifests backup schedule. + Manifests *SpecDistributionModulesDrVeleroSchedulesDefinitionsManifests `json:"manifests,omitempty" yaml:"manifests,omitempty" mapstructure:"manifests,omitempty"` +} + +// Configuration for Velero's manifests backup schedule. +type SpecDistributionModulesDrVeleroSchedulesDefinitionsFull struct { + // The cron expression for the `full` backup schedule (default `0 1 * * *`). + Schedule *string `json:"schedule,omitempty" yaml:"schedule,omitempty" mapstructure:"schedule,omitempty"` + + // EXPERIMENTAL (if you do more than one backups, the following backups after the + // first are not automatically restorable, see + // https://github.com/vmware-tanzu/velero/issues/7057#issuecomment-2466815898 for + // the manual restore solution): SnapshotMoveData specifies whether snapshot data + // should be moved. Velero will create a new volume from the snapshot and upload + // the content to the storageLocation. + SnapshotMoveData *bool `json:"snapshotMoveData,omitempty" yaml:"snapshotMoveData,omitempty" mapstructure:"snapshotMoveData,omitempty"` + + // The Time To Live (TTL) of the backups created by the backup schedules (default + // `720h0m0s`, 30 days). Notice that changing this value will affect only newly + // created backups, prior backups will keep the old TTL. + Ttl *string `json:"ttl,omitempty" yaml:"ttl,omitempty" mapstructure:"ttl,omitempty"` +} + +// Configuration for Velero's manifests backup schedule. +type SpecDistributionModulesDrVeleroSchedulesDefinitionsManifests struct { + // The cron expression for the `manifests` backup schedule (default `*/15 * * * + // *`). + Schedule *string `json:"schedule,omitempty" yaml:"schedule,omitempty" mapstructure:"schedule,omitempty"` + + // The Time To Live (TTL) of the backups created by the backup schedules (default + // `720h0m0s`, 30 days). Notice that changing this value will affect only newly + // created backups, prior backups will keep the old TTL. + Ttl *string `json:"ttl,omitempty" yaml:"ttl,omitempty" mapstructure:"ttl,omitempty"` +} + +// Configuration for the additional snapshotController component installation. +type SpecDistributionModulesDrVeleroSnapshotController struct { + // Whether to install or not the snapshotController component in the cluster. + // Before enabling this field, check if your CSI driver does not have + // snapshotController built-in. + Install *bool `json:"install,omitempty" yaml:"install,omitempty" mapstructure:"install,omitempty"` +} + type SpecDistributionModulesIngress struct { - // the base domain used for all the KFD ingresses, if in the nginx dual - // configuration, it should be the same as the - // .spec.distribution.modules.ingress.dns.private.name zone + // The base domain used for all the KFD infrastructural ingresses. If using the + // nginx `dual` type, this value should be the same as the domain associated with + // the `internal` ingress class. BaseDomain string `json:"baseDomain" yaml:"baseDomain" mapstructure:"baseDomain"` - // CertManager corresponds to the JSON schema field "certManager". + // Configuration for the cert-manager package. Required even if + // `ingress.nginx.type` is `none`, cert-manager is used for managing other + // certificates in the cluster besides the TLS termination certificates for the + // ingresses. CertManager *SpecDistributionModulesIngressCertManager `json:"certManager,omitempty" yaml:"certManager,omitempty" mapstructure:"certManager,omitempty"` // Forecastle corresponds to the JSON schema field "forecastle". @@ -579,7 +717,7 @@ type SpecDistributionModulesIngress struct { // If corresponds to the JSON schema field "if". If interface{} `json:"if,omitempty" yaml:"if,omitempty" mapstructure:"if,omitempty"` - // Configurations for the nginx ingress controller module + // Configurations for the Ingress nginx controller package. Nginx SpecDistributionModulesIngressNginx `json:"nginx" yaml:"nginx" mapstructure:"nginx"` // Overrides corresponds to the JSON schema field "overrides". @@ -589,6 +727,10 @@ type SpecDistributionModulesIngress struct { Then interface{} `json:"then,omitempty" yaml:"then,omitempty" mapstructure:"then,omitempty"` } +// Configuration for the cert-manager package. Required even if +// `ingress.nginx.type` is `none`, cert-manager is used for managing other +// certificates in the cluster besides the TLS termination certificates for the +// ingresses. type SpecDistributionModulesIngressCertManager struct { // ClusterIssuer corresponds to the JSON schema field "clusterIssuer". ClusterIssuer SpecDistributionModulesIngressCertManagerClusterIssuer `json:"clusterIssuer" yaml:"clusterIssuer" mapstructure:"clusterIssuer"` @@ -597,17 +739,23 @@ type SpecDistributionModulesIngressCertManager struct { Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } +// Configuration for the cert-manager's ACME clusterIssuer used to request +// certificates from Let's Encrypt. type SpecDistributionModulesIngressCertManagerClusterIssuer struct { - // The email of the cluster issuer + // The email address to use during the certificate issuing process. Email string `json:"email" yaml:"email" mapstructure:"email"` - // The name of the cluster issuer + // The name of the clusterIssuer. Name string `json:"name" yaml:"name" mapstructure:"name"` - // The custom solvers configurations + // The list of challenge solvers to use instead of the default one for the + // `http01` challenge. Check [cert manager's + // documentation](https://cert-manager.io/docs/configuration/acme/#adding-multiple-solver-types) + // for examples for this field. Solvers []interface{} `json:"solvers,omitempty" yaml:"solvers,omitempty" mapstructure:"solvers,omitempty"` - // The type of the cluster issuer, must be ***http01*** + // The type of the clusterIssuer. Only `http01` challenge is supported for + // on-premises clusters. See solvers for arbitrary configurations. Type *SpecDistributionModulesIngressCertManagerClusterIssuerType `json:"type,omitempty" yaml:"type,omitempty" mapstructure:"type,omitempty"` } @@ -627,14 +775,24 @@ type SpecDistributionModulesIngressNginx struct { // Tls corresponds to the JSON schema field "tls". Tls *SpecDistributionModulesIngressNginxTLS `json:"tls,omitempty" yaml:"tls,omitempty" mapstructure:"tls,omitempty"` - // The type of the nginx ingress controller, must be ***none***, ***single*** or - // ***dual*** + // The type of the Ingress nginx controller, options are: + // - `none`: no ingress controller will be installed and no infrastructural + // ingresses will be created. + // - `single`: a single ingress controller with ingress class `nginx` will be + // installed to manage all the ingress resources, infrastructural ingresses will + // be created. + // - `dual`: two independent ingress controllers will be installed, one for the + // `internal` ingress class intended for private ingresses and one for the + // `external` ingress class intended for public ingresses. KFD infrastructural + // ingresses wil use the `internal` ingress class when using the dual type. + // + // Default is `single`. Type SpecDistributionModulesIngressNginxType `json:"type" yaml:"type" mapstructure:"type"` } type SpecDistributionModulesIngressNginxTLS struct { - // The provider of the TLS certificate, must be ***none***, ***certManager*** or - // ***secret*** + // The provider of the TLS certificates for the ingresses, one of: `none`, + // `certManager`, or `secret`. Provider SpecDistributionModulesIngressNginxTLSProvider `json:"provider" yaml:"provider" mapstructure:"provider"` // Secret corresponds to the JSON schema field "secret". @@ -649,15 +807,18 @@ const ( SpecDistributionModulesIngressNginxTLSProviderSecret SpecDistributionModulesIngressNginxTLSProvider = "secret" ) +// Kubernetes TLS secret for the ingresses TLS certificate. type SpecDistributionModulesIngressNginxTLSSecret struct { - // Ca corresponds to the JSON schema field "ca". + // The Certificate Authority certificate file's content. You can use the + // `"{file://}"` notation to get the content from a file. Ca string `json:"ca" yaml:"ca" mapstructure:"ca"` - // The certificate file content or you can use the file notation to get the - // content from a file + // The certificate file's content. You can use the `"{file://}"` notation to + // get the content from a file. Cert string `json:"cert" yaml:"cert" mapstructure:"cert"` - // Key corresponds to the JSON schema field "key". + // The signing key file's content. You can use the `"{file://}"` notation to + // get the content from a file. Key string `json:"key" yaml:"key" mapstructure:"key"` } @@ -669,14 +830,17 @@ const ( SpecDistributionModulesIngressNginxTypeSingle SpecDistributionModulesIngressNginxType = "single" ) +// Override the common configuration with a particular configuration for the +// Ingress module. type SpecDistributionModulesIngressOverrides struct { // Ingresses corresponds to the JSON schema field "ingresses". Ingresses *SpecDistributionModulesIngressOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` - // The node selector to use to place the pods for the ingress module + // Set to override the node selector used to place the pods of the Ingress module. NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - // The tolerations that will be added to the pods for the ingress module + // Set to override the tolerations that will be added to the pods of the Ingress + // module. Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } @@ -685,6 +849,7 @@ type SpecDistributionModulesIngressOverridesIngresses struct { Forecastle *TypesFuryModuleOverridesIngress `json:"forecastle,omitempty" yaml:"forecastle,omitempty" mapstructure:"forecastle,omitempty"` } +// Configuration for the Logging module. type SpecDistributionModulesLogging struct { // Cerebro corresponds to the JSON schema field "cerebro". Cerebro *SpecDistributionModulesLoggingCerebro `json:"cerebro,omitempty" yaml:"cerebro,omitempty" mapstructure:"cerebro,omitempty"` @@ -707,83 +872,104 @@ type SpecDistributionModulesLogging struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - // selects the logging stack. Choosing none will disable the centralized logging. - // Choosing opensearch will deploy and configure the Logging Operator and an + // Selects the logging stack. Options are: + // - `none`: will disable the centralized logging. + // - `opensearch`: will deploy and configure the Logging Operator and an // OpenSearch cluster (can be single or triple for HA) where the logs will be - // stored. Choosing loki will use a distributed Grafana Loki instead of OpenSearh - // for storage. Choosing customOuput the Logging Operator will be deployed and - // installed but with no local storage, you will have to create the needed Outputs - // and ClusterOutputs to ship the logs to your desired storage. + // stored. + // - `loki`: will use a distributed Grafana Loki instead of OpenSearch for + // storage. + // - `customOuputs`: the Logging Operator will be deployed and installed but + // without in-cluster storage, you will have to create the needed Outputs and + // ClusterOutputs to ship the logs to your desired storage. + // + // Default is `opensearch`. Type SpecDistributionModulesLoggingType `json:"type" yaml:"type" mapstructure:"type"` } +// DEPRECATED since KFD v1.26.6, 1.27.5, v1.28.0. type SpecDistributionModulesLoggingCerebro struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } -// when using the customOutputs logging type, you need to manually specify the spec -// of the several Output and ClusterOutputs that the Logging Operator expects to -// forward the logs collected by the pre-defined flows. +// When using the `customOutputs` logging type, you need to manually specify the +// spec of the several `Output` and `ClusterOutputs` that the Logging Operator +// expects to forward the logs collected by the pre-defined flows. type SpecDistributionModulesLoggingCustomOutputs struct { - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `audit` Flow will be sent. This + // will be the `spec` section of the `Output` object. It must be a string (and not + // a YAML object) following the OutputSpec definition. Use the `nullout` output to + // discard the flow: `nullout: {}` Audit string `json:"audit" yaml:"audit" mapstructure:"audit"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `errors` Flow will be sent. This + // will be the `spec` section of the `Output` object. It must be a string (and not + // a YAML object) following the OutputSpec definition. Use the `nullout` output to + // discard the flow: `nullout: {}` Errors string `json:"errors" yaml:"errors" mapstructure:"errors"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `events` Flow will be sent. This + // will be the `spec` section of the `Output` object. It must be a string (and not + // a YAML object) following the OutputSpec definition. Use the `nullout` output to + // discard the flow: `nullout: {}` Events string `json:"events" yaml:"events" mapstructure:"events"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `infra` Flow will be sent. This + // will be the `spec` section of the `Output` object. It must be a string (and not + // a YAML object) following the OutputSpec definition. Use the `nullout` output to + // discard the flow: `nullout: {}` Infra string `json:"infra" yaml:"infra" mapstructure:"infra"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `ingressNginx` Flow will be sent. + // This will be the `spec` section of the `Output` object. It must be a string + // (and not a YAML object) following the OutputSpec definition. Use the `nullout` + // output to discard the flow: `nullout: {}` IngressNginx string `json:"ingressNginx" yaml:"ingressNginx" mapstructure:"ingressNginx"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `kubernetes` Flow will be sent. + // This will be the `spec` section of the `Output` object. It must be a string + // (and not a YAML object) following the OutputSpec definition. Use the `nullout` + // output to discard the flow: `nullout: {}` Kubernetes string `json:"kubernetes" yaml:"kubernetes" mapstructure:"kubernetes"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `systemdCommon` Flow will be sent. + // This will be the `spec` section of the `Output` object. It must be a string + // (and not a YAML object) following the OutputSpec definition. Use the `nullout` + // output to discard the flow: `nullout: {}` SystemdCommon string `json:"systemdCommon" yaml:"systemdCommon" mapstructure:"systemdCommon"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `systemdEtcd` Flow will be sent. + // This will be the `spec` section of the `Output` object. It must be a string + // (and not a YAML object) following the OutputSpec definition. Use the `nullout` + // output to discard the flow: `nullout: {}` SystemdEtcd string `json:"systemdEtcd" yaml:"systemdEtcd" mapstructure:"systemdEtcd"` } +// Configuration for the Loki package. type SpecDistributionModulesLoggingLoki struct { - // Backend corresponds to the JSON schema field "backend". + // The storage backend type for Loki. `minio` will use an in-cluster MinIO + // deployment for object storage, `externalEndpoint` can be used to point to an + // external object storage instead of deploying an in-cluster MinIO. Backend *SpecDistributionModulesLoggingLokiBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` - // ExternalEndpoint corresponds to the JSON schema field "externalEndpoint". + // Configuration for Loki's external storage backend. ExternalEndpoint *SpecDistributionModulesLoggingLokiExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` // Resources corresponds to the JSON schema field "resources". Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` + + // Starting from versions 1.28.4, 1.29.5 and 1.30.0 of KFD, Loki will change the + // time series database from BoltDB to TSDB and the schema from v11 to v13 that it + // uses to store the logs. + // + // The value of this field will determine the date when Loki will start writing + // using the new TSDB and the schema v13, always at midnight UTC. The old BoltDB + // and schema will be kept until they expire for reading purposes. + // + // Value must be a string in `ISO 8601` date format (`yyyy-mm-dd`). Example: + // `2024-11-18`. + TsdbStartDate types.SerializableDate `json:"tsdbStartDate" yaml:"tsdbStartDate" mapstructure:"tsdbStartDate"` } type SpecDistributionModulesLoggingLokiBackend string @@ -793,23 +979,25 @@ const ( SpecDistributionModulesLoggingLokiBackendMinio SpecDistributionModulesLoggingLokiBackend = "minio" ) +// Configuration for Loki's external storage backend. type SpecDistributionModulesLoggingLokiExternalEndpoint struct { - // The access key id of the loki external endpoint + // The access key ID (username) for the external S3-compatible bucket. AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` - // The bucket name of the loki external endpoint + // The bucket name of the external S3-compatible object storage. BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` - // The endpoint of the loki external endpoint + // External S3-compatible endpoint for Loki's storage. Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` - // If true, the loki external endpoint will be insecure + // If true, will use HTTP as protocol instead of HTTPS. Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` - // The secret access key of the loki external endpoint + // The secret access key (password) for the external S3-compatible bucket. SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` } +// Configuration for Logging's MinIO deployment. type SpecDistributionModulesLoggingMinio struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` @@ -817,15 +1005,15 @@ type SpecDistributionModulesLoggingMinio struct { // RootUser corresponds to the JSON schema field "rootUser". RootUser *SpecDistributionModulesLoggingMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` - // The PVC size for each minio disk, 6 disks total + // The PVC size for each MinIO disk, 6 disks total. StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` } type SpecDistributionModulesLoggingMinioRootUser struct { - // The password of the minio root user + // The password for the default MinIO root user. Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` - // The username of the minio root user + // The username for the default MinIO root user. Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` } @@ -836,10 +1024,12 @@ type SpecDistributionModulesLoggingOpensearch struct { // Resources corresponds to the JSON schema field "resources". Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` - // The storage size for the opensearch pods + // The storage size for the OpenSearch volumes. Follows Kubernetes resources + // storage requests. Default is `150Gi`. StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` - // The type of the opensearch, must be ***single*** or ***triple*** + // The type of OpenSearch deployment. One of: `single` for a single replica or + // `triple` for an HA 3-replicas deployment. Type SpecDistributionModulesLoggingOpensearchType `json:"type" yaml:"type" mapstructure:"type"` } @@ -850,6 +1040,7 @@ const ( SpecDistributionModulesLoggingOpensearchTypeTriple SpecDistributionModulesLoggingOpensearchType = "triple" ) +// Configuration for the Logging Operator. type SpecDistributionModulesLoggingOperator struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` @@ -864,7 +1055,7 @@ const ( SpecDistributionModulesLoggingTypeOpensearch SpecDistributionModulesLoggingType = "opensearch" ) -// configuration for the Monitoring module components +// Configuration for the Monitoring module. type SpecDistributionModulesMonitoring struct { // Alertmanager corresponds to the JSON schema field "alertmanager". Alertmanager *SpecDistributionModulesMonitoringAlertManager `json:"alertmanager,omitempty" yaml:"alertmanager,omitempty" mapstructure:"alertmanager,omitempty"` @@ -893,22 +1084,23 @@ type SpecDistributionModulesMonitoring struct { // PrometheusAgent corresponds to the JSON schema field "prometheusAgent". PrometheusAgent *SpecDistributionModulesMonitoringPrometheusAgent `json:"prometheusAgent,omitempty" yaml:"prometheusAgent,omitempty" mapstructure:"prometheusAgent,omitempty"` - // The type of the monitoring, must be ***none***, ***prometheus***, - // ***prometheusAgent*** or ***mimir***. + // The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or + // `mimir`. // // - `none`: will disable the whole monitoring stack. // - `prometheus`: will install Prometheus Operator and a preconfigured Prometheus - // instace, Alertmanager, a set of alert rules, exporters needed to monitor all + // instance, Alertmanager, a set of alert rules, exporters needed to monitor all // the components of the cluster, Grafana and a series of dashboards to view the // collected metrics, and more. - // - `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus - // in Agent mode (no alerting, no queries, no storage), and all the exporters - // needed to get metrics for the status of the cluster and the workloads. Useful - // when having a centralized (remote) Prometheus where to ship the metrics and not - // storing them locally in the cluster. - // - `mimir`: will install the same as the `prometheus` option, and in addition - // Grafana Mimir that allows for longer retention of metrics and the usage of - // Object Storage. + // - `prometheusAgent`: will install Prometheus operator, an instance of + // Prometheus in Agent mode (no alerting, no queries, no storage), and all the + // exporters needed to get metrics for the status of the cluster and the + // workloads. Useful when having a centralized (remote) Prometheus where to ship + // the metrics and not storing them locally in the cluster. + // - `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir + // that allows for longer retention of metrics and the usage of Object Storage. + // + // Default is `prometheus`. Type SpecDistributionModulesMonitoringType `json:"type" yaml:"type" mapstructure:"type"` // X509Exporter corresponds to the JSON schema field "x509Exporter". @@ -916,14 +1108,15 @@ type SpecDistributionModulesMonitoring struct { } type SpecDistributionModulesMonitoringAlertManager struct { - // The webhook url to send deadman switch monitoring, for example to use with - // healthchecks.io + // The webhook URL to send dead man's switch monitoring, for example to use with + // healthchecks.io. DeadManSwitchWebhookUrl *string `json:"deadManSwitchWebhookUrl,omitempty" yaml:"deadManSwitchWebhookUrl,omitempty" mapstructure:"deadManSwitchWebhookUrl,omitempty"` - // If true, the default rules will be installed + // Set to false to avoid installing the Prometheus rules (alerts) included with + // the distribution. InstallDefaultRules *bool `json:"installDefaultRules,omitempty" yaml:"installDefaultRules,omitempty" mapstructure:"installDefaultRules,omitempty"` - // The slack webhook url to send alerts + // The Slack webhook URL where to send the infrastructural and workload alerts to. SlackWebhookUrl *string `json:"slackWebhookUrl,omitempty" yaml:"slackWebhookUrl,omitempty" mapstructure:"slackWebhookUrl,omitempty"` } @@ -962,17 +1155,22 @@ type SpecDistributionModulesMonitoringKubeStateMetrics struct { Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } +// Configuration for the Mimir package. type SpecDistributionModulesMonitoringMimir struct { - // The backend for the mimir pods, must be ***minio*** or ***externalEndpoint*** + // The storage backend type for Mimir. `minio` will use an in-cluster MinIO + // deployment for object storage, `externalEndpoint` can be used to point to an + // external S3-compatible object storage instead of deploying an in-cluster MinIO. Backend *SpecDistributionModulesMonitoringMimirBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` - // ExternalEndpoint corresponds to the JSON schema field "externalEndpoint". + // Configuration for Mimir's external storage backend. ExternalEndpoint *SpecDistributionModulesMonitoringMimirExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - // The retention time for the mimir pods + // The retention time for the logs stored in Mimir. Default is `30d`. Value must + // match the regular expression `[0-9]+(ns|us|µs|ms|s|m|h|d|w|y)` where y = 365 + // days. RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` } @@ -983,2387 +1181,2533 @@ const ( SpecDistributionModulesMonitoringMimirBackendMinio SpecDistributionModulesMonitoringMimirBackend = "minio" ) +// Configuration for Mimir's external storage backend. type SpecDistributionModulesMonitoringMimirExternalEndpoint struct { - // The access key id of the external mimir backend + // The access key ID (username) for the external S3-compatible bucket. AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` - // The bucket name of the external mimir backend + // The bucket name of the external S3-compatible object storage. BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` - // The endpoint of the external mimir backend + // The external S3-compatible endpoint for Mimir's storage. Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` - // If true, the external mimir backend will not use tls + // If true, will use HTTP as protocol instead of HTTPS. Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` - // The secret access key of the external mimir backend + // The secret access key (password) for the external S3-compatible bucket. SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` } -type SpecDistributionModulesMonitoringMinio struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // RootUser corresponds to the JSON schema field "rootUser". - RootUser *SpecDistributionModulesMonitoringMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` - - // The storage size for the minio pods - StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesTracingType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesTracingType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingType, v) + } + *j = SpecDistributionModulesTracingType(v) + return nil } -type SpecDistributionModulesMonitoringMinioRootUser struct { - // The password for the minio root user - Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` - - // The username for the minio root user - Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngress) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["baseDomain"]; !ok || v == nil { + return fmt.Errorf("field baseDomain in SpecDistributionModulesIngress: required") + } + if v, ok := raw["nginx"]; !ok || v == nil { + return fmt.Errorf("field nginx in SpecDistributionModulesIngress: required") + } + type Plain SpecDistributionModulesIngress + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngress(plain) + return nil } -type SpecDistributionModulesMonitoringPrometheus struct { - // Set this option to ship the collected metrics to a remote Prometheus receiver. - // - // `remoteWrite` is an array of objects that allows configuring the - // [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for - // Prometheus. The objects in the array follow [the same schema as in the - // prometheus - // operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec). - RemoteWrite []SpecDistributionModulesMonitoringPrometheusRemoteWriteElem `json:"remoteWrite,omitempty" yaml:"remoteWrite,omitempty" mapstructure:"remoteWrite,omitempty"` - - // Resources corresponds to the JSON schema field "resources". - Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` - - // The retention size for the k8s Prometheus instance. - RetentionSize *string `json:"retentionSize,omitempty" yaml:"retentionSize,omitempty" mapstructure:"retentionSize,omitempty"` - - // The retention time for the k8s Prometheus instance. - RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` - - // The storage size for the k8s Prometheus instance. - StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressNginxType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesIngressNginxType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxType, v) + } + *j = SpecDistributionModulesIngressNginxType(v) + return nil } -type SpecDistributionModulesMonitoringPrometheusAgent struct { - // Set this option to ship the collected metrics to a remote Prometheus receiver. - // - // `remoteWrite` is an array of objects that allows configuring the - // [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for - // Prometheus. The objects in the array follow [the same schema as in the - // prometheus - // operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec). - RemoteWrite []SpecDistributionModulesMonitoringPrometheusAgentRemoteWriteElem `json:"remoteWrite,omitempty" yaml:"remoteWrite,omitempty" mapstructure:"remoteWrite,omitempty"` - - // Resources corresponds to the JSON schema field "resources". - Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` +var enumValues_SpecDistributionModulesIngressNginxType = []interface{}{ + "none", + "single", + "dual", } -type SpecDistributionModulesMonitoringPrometheusAgentRemoteWriteElem map[string]interface{} - -type SpecDistributionModulesMonitoringPrometheusRemoteWriteElem map[string]interface{} - -type SpecDistributionModulesMonitoringType string - -const ( - SpecDistributionModulesMonitoringTypeMimir SpecDistributionModulesMonitoringType = "mimir" - SpecDistributionModulesMonitoringTypeNone SpecDistributionModulesMonitoringType = "none" - SpecDistributionModulesMonitoringTypePrometheus SpecDistributionModulesMonitoringType = "prometheus" - SpecDistributionModulesMonitoringTypePrometheusAgent SpecDistributionModulesMonitoringType = "prometheusAgent" -) - -type SpecDistributionModulesMonitoringX509Exporter struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesLoggingCustomOutputs) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["audit"]; !ok || v == nil { + return fmt.Errorf("field audit in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["errors"]; !ok || v == nil { + return fmt.Errorf("field errors in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["events"]; !ok || v == nil { + return fmt.Errorf("field events in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["infra"]; !ok || v == nil { + return fmt.Errorf("field infra in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["ingressNginx"]; !ok || v == nil { + return fmt.Errorf("field ingressNginx in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["kubernetes"]; !ok || v == nil { + return fmt.Errorf("field kubernetes in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["systemdCommon"]; !ok || v == nil { + return fmt.Errorf("field systemdCommon in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["systemdEtcd"]; !ok || v == nil { + return fmt.Errorf("field systemdEtcd in SpecDistributionModulesLoggingCustomOutputs: required") + } + type Plain SpecDistributionModulesLoggingCustomOutputs + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesLoggingCustomOutputs(plain) + return nil } -type SpecDistributionModulesNetworking struct { - // Cilium corresponds to the JSON schema field "cilium". - Cilium *SpecDistributionModulesNetworkingCilium `json:"cilium,omitempty" yaml:"cilium,omitempty" mapstructure:"cilium,omitempty"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // TigeraOperator corresponds to the JSON schema field "tigeraOperator". - TigeraOperator *SpecDistributionModulesNetworkingTigeraOperator `json:"tigeraOperator,omitempty" yaml:"tigeraOperator,omitempty" mapstructure:"tigeraOperator,omitempty"` - - // The type of networking to use, either ***calico*** or ***cilium*** - Type SpecDistributionModulesNetworkingType `json:"type" yaml:"type" mapstructure:"type"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressNginxTLS) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["provider"]; !ok || v == nil { + return fmt.Errorf("field provider in SpecDistributionModulesIngressNginxTLS: required") + } + type Plain SpecDistributionModulesIngressNginxTLS + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressNginxTLS(plain) + return nil } -type SpecDistributionModulesNetworkingCilium struct { - // The mask size to use for the cilium pods - MaskSize *string `json:"maskSize,omitempty" yaml:"maskSize,omitempty" mapstructure:"maskSize,omitempty"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // The pod cidr to use for the cilium pods - PodCidr *TypesCidr `json:"podCidr,omitempty" yaml:"podCidr,omitempty" mapstructure:"podCidr,omitempty"` +var enumValues_SpecDistributionModulesLoggingLokiBackend = []interface{}{ + "minio", + "externalEndpoint", } -type SpecDistributionModulesNetworkingTigeraOperator struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` -} - -type SpecDistributionModulesNetworkingType string - -const ( - SpecDistributionModulesNetworkingTypeCalico SpecDistributionModulesNetworkingType = "calico" - SpecDistributionModulesNetworkingTypeCilium SpecDistributionModulesNetworkingType = "cilium" -) - -type SpecDistributionModulesPolicy struct { - // Gatekeeper corresponds to the JSON schema field "gatekeeper". - Gatekeeper *SpecDistributionModulesPolicyGatekeeper `json:"gatekeeper,omitempty" yaml:"gatekeeper,omitempty" mapstructure:"gatekeeper,omitempty"` - - // Kyverno corresponds to the JSON schema field "kyverno". - Kyverno *SpecDistributionModulesPolicyKyverno `json:"kyverno,omitempty" yaml:"kyverno,omitempty" mapstructure:"kyverno,omitempty"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // The type of security to use, either ***none***, ***gatekeeper*** or - // ***kyverno*** - Type SpecDistributionModulesPolicyType `json:"type" yaml:"type" mapstructure:"type"` -} - -type SpecDistributionModulesPolicyGatekeeper struct { - // This parameter adds namespaces to Gatekeeper's exemption list, so it will not - // enforce the constraints on them. - AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"` - - // The enforcement action to use for the gatekeeper module - EnforcementAction SpecDistributionModulesPolicyGatekeeperEnforcementAction `json:"enforcementAction" yaml:"enforcementAction" mapstructure:"enforcementAction"` - - // If true, the default policies will be installed - InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` -} - -type SpecDistributionModulesPolicyGatekeeperEnforcementAction string - -const ( - SpecDistributionModulesPolicyGatekeeperEnforcementActionDeny SpecDistributionModulesPolicyGatekeeperEnforcementAction = "deny" - SpecDistributionModulesPolicyGatekeeperEnforcementActionDryrun SpecDistributionModulesPolicyGatekeeperEnforcementAction = "dryrun" - SpecDistributionModulesPolicyGatekeeperEnforcementActionWarn SpecDistributionModulesPolicyGatekeeperEnforcementAction = "warn" -) - -type SpecDistributionModulesPolicyKyverno struct { - // This parameter adds namespaces to Kyverno's exemption list, so it will not - // enforce the constraints on them. - AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"` - - // If true, the default policies will be installed - InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // The validation failure action to use for the kyverno module - ValidationFailureAction SpecDistributionModulesPolicyKyvernoValidationFailureAction `json:"validationFailureAction" yaml:"validationFailureAction" mapstructure:"validationFailureAction"` -} - -type SpecDistributionModulesPolicyKyvernoValidationFailureAction string - -const ( - SpecDistributionModulesPolicyKyvernoValidationFailureActionAudit SpecDistributionModulesPolicyKyvernoValidationFailureAction = "audit" - SpecDistributionModulesPolicyKyvernoValidationFailureActionEnforce SpecDistributionModulesPolicyKyvernoValidationFailureAction = "enforce" -) - -type SpecDistributionModulesPolicyType string - -const ( - SpecDistributionModulesPolicyTypeGatekeeper SpecDistributionModulesPolicyType = "gatekeeper" - SpecDistributionModulesPolicyTypeKyverno SpecDistributionModulesPolicyType = "kyverno" - SpecDistributionModulesPolicyTypeNone SpecDistributionModulesPolicyType = "none" -) - -type SpecDistributionModulesTracing struct { - // Minio corresponds to the JSON schema field "minio". - Minio *SpecDistributionModulesTracingMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // Tempo corresponds to the JSON schema field "tempo". - Tempo *SpecDistributionModulesTracingTempo `json:"tempo,omitempty" yaml:"tempo,omitempty" mapstructure:"tempo,omitempty"` - - // The type of tracing to use, either ***none*** or ***tempo*** - Type SpecDistributionModulesTracingType `json:"type" yaml:"type" mapstructure:"type"` -} - -type SpecDistributionModulesTracingMinio struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // RootUser corresponds to the JSON schema field "rootUser". - RootUser *SpecDistributionModulesTracingMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` - - // The storage size for the minio pods - StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` -} - -type SpecDistributionModulesTracingMinioRootUser struct { - // The password for the minio root user - Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` - - // The username for the minio root user - Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` -} - -type SpecDistributionModulesTracingTempo struct { - // The backend for the tempo pods, must be ***minio*** or ***externalEndpoint*** - Backend *SpecDistributionModulesTracingTempoBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` - - // ExternalEndpoint corresponds to the JSON schema field "externalEndpoint". - ExternalEndpoint *SpecDistributionModulesTracingTempoExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // The retention time for the tempo pods - RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` -} - -type SpecDistributionModulesTracingTempoBackend string - -const ( - SpecDistributionModulesTracingTempoBackendExternalEndpoint SpecDistributionModulesTracingTempoBackend = "externalEndpoint" - SpecDistributionModulesTracingTempoBackendMinio SpecDistributionModulesTracingTempoBackend = "minio" -) - -type SpecDistributionModulesTracingTempoExternalEndpoint struct { - // The access key id of the external tempo backend - AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` - - // The bucket name of the external tempo backend - BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` - - // The endpoint of the external tempo backend - Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` - - // If true, the external tempo backend will not use tls - Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` - - // The secret access key of the external tempo backend - SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` -} - -type SpecDistributionModulesTracingType string - -const ( - SpecDistributionModulesTracingTypeNone SpecDistributionModulesTracingType = "none" - SpecDistributionModulesTracingTypeTempo SpecDistributionModulesTracingType = "tempo" -) - -type SpecKubernetes struct { - // Advanced corresponds to the JSON schema field "advanced". - Advanced *SpecKubernetesAdvanced `json:"advanced,omitempty" yaml:"advanced,omitempty" mapstructure:"advanced,omitempty"` - - // AdvancedAnsible corresponds to the JSON schema field "advancedAnsible". - AdvancedAnsible *SpecKubernetesAdvancedAnsible `json:"advancedAnsible,omitempty" yaml:"advancedAnsible,omitempty" mapstructure:"advancedAnsible,omitempty"` - - // The address of the control plane - ControlPlaneAddress string `json:"controlPlaneAddress" yaml:"controlPlaneAddress" mapstructure:"controlPlaneAddress"` - - // The DNS zone to use for the cluster - DnsZone string `json:"dnsZone" yaml:"dnsZone" mapstructure:"dnsZone"` - - // LoadBalancers corresponds to the JSON schema field "loadBalancers". - LoadBalancers SpecKubernetesLoadBalancers `json:"loadBalancers" yaml:"loadBalancers" mapstructure:"loadBalancers"` - - // Masters corresponds to the JSON schema field "masters". - Masters SpecKubernetesMasters `json:"masters" yaml:"masters" mapstructure:"masters"` - - // Nodes corresponds to the JSON schema field "nodes". - Nodes SpecKubernetesNodes `json:"nodes" yaml:"nodes" mapstructure:"nodes"` - - // The folder where the PKI will be stored - PkiFolder string `json:"pkiFolder" yaml:"pkiFolder" mapstructure:"pkiFolder"` - - // The CIDR to use for the pods - PodCidr TypesCidr `json:"podCidr" yaml:"podCidr" mapstructure:"podCidr"` - - // Proxy corresponds to the JSON schema field "proxy". - Proxy *SpecKubernetesProxy `json:"proxy,omitempty" yaml:"proxy,omitempty" mapstructure:"proxy,omitempty"` - - // Ssh corresponds to the JSON schema field "ssh". - Ssh SpecKubernetesSSH `json:"ssh" yaml:"ssh" mapstructure:"ssh"` - - // The CIDR to use for the services - SvcCidr TypesCidr `json:"svcCidr" yaml:"svcCidr" mapstructure:"svcCidr"` -} - -type SpecKubernetesAdvanced struct { - // AirGap corresponds to the JSON schema field "airGap". - AirGap *SpecKubernetesAdvancedAirGap `json:"airGap,omitempty" yaml:"airGap,omitempty" mapstructure:"airGap,omitempty"` - - // Cloud corresponds to the JSON schema field "cloud". - Cloud *SpecKubernetesAdvancedCloud `json:"cloud,omitempty" yaml:"cloud,omitempty" mapstructure:"cloud,omitempty"` - - // Containerd corresponds to the JSON schema field "containerd". - Containerd *SpecKubernetesAdvancedContainerd `json:"containerd,omitempty" yaml:"containerd,omitempty" mapstructure:"containerd,omitempty"` - - // Encryption corresponds to the JSON schema field "encryption". - Encryption *SpecKubernetesAdvancedEncryption `json:"encryption,omitempty" yaml:"encryption,omitempty" mapstructure:"encryption,omitempty"` - - // Oidc corresponds to the JSON schema field "oidc". - Oidc *SpecKubernetesAdvancedOIDC `json:"oidc,omitempty" yaml:"oidc,omitempty" mapstructure:"oidc,omitempty"` - - // URL of the registry where to pull images from for the Kubernetes phase. - // (Default is registry.sighup.io/fury/on-premises). - Registry *string `json:"registry,omitempty" yaml:"registry,omitempty" mapstructure:"registry,omitempty"` - - // Users corresponds to the JSON schema field "users". - Users *SpecKubernetesAdvancedUsers `json:"users,omitempty" yaml:"users,omitempty" mapstructure:"users,omitempty"` -} - -type SpecKubernetesAdvancedAirGap struct { - // The containerd download url - ContainerdDownloadUrl *string `json:"containerdDownloadUrl,omitempty" yaml:"containerdDownloadUrl,omitempty" mapstructure:"containerdDownloadUrl,omitempty"` - - // DependenciesOverride corresponds to the JSON schema field - // "dependenciesOverride". - DependenciesOverride *SpecKubernetesAdvancedAirGapDependenciesOverride `json:"dependenciesOverride,omitempty" yaml:"dependenciesOverride,omitempty" mapstructure:"dependenciesOverride,omitempty"` - - // The etcd download url - EtcdDownloadUrl *string `json:"etcdDownloadUrl,omitempty" yaml:"etcdDownloadUrl,omitempty" mapstructure:"etcdDownloadUrl,omitempty"` - - // The runc checksum - RuncChecksum *string `json:"runcChecksum,omitempty" yaml:"runcChecksum,omitempty" mapstructure:"runcChecksum,omitempty"` - - // The runc download url - RuncDownloadUrl *string `json:"runcDownloadUrl,omitempty" yaml:"runcDownloadUrl,omitempty" mapstructure:"runcDownloadUrl,omitempty"` -} - -type SpecKubernetesAdvancedAirGapDependenciesOverride struct { - // Apt corresponds to the JSON schema field "apt". - Apt *SpecKubernetesAdvancedAirGapDependenciesOverrideApt `json:"apt,omitempty" yaml:"apt,omitempty" mapstructure:"apt,omitempty"` - - // Yum corresponds to the JSON schema field "yum". - Yum *SpecKubernetesAdvancedAirGapDependenciesOverrideYum `json:"yum,omitempty" yaml:"yum,omitempty" mapstructure:"yum,omitempty"` -} - -type SpecKubernetesAdvancedAirGapDependenciesOverrideApt struct { - // The gpg key of the apt dependency - GpgKey string `json:"gpg_key" yaml:"gpg_key" mapstructure:"gpg_key"` - - // The gpg key id of the apt dependency - GpgKeyId string `json:"gpg_key_id" yaml:"gpg_key_id" mapstructure:"gpg_key_id"` - - // The name of the apt dependency - Name string `json:"name" yaml:"name" mapstructure:"name"` - - // The repo of the apt dependency - Repo string `json:"repo" yaml:"repo" mapstructure:"repo"` -} - -type SpecKubernetesAdvancedAirGapDependenciesOverrideYum struct { - // The gpg key of the yum dependency - GpgKey string `json:"gpg_key" yaml:"gpg_key" mapstructure:"gpg_key"` - - // If true, the gpg key check will be enabled - GpgKeyCheck bool `json:"gpg_key_check" yaml:"gpg_key_check" mapstructure:"gpg_key_check"` - - // The name of the yum dependency - Name string `json:"name" yaml:"name" mapstructure:"name"` - - // The repo of the yum dependency - Repo string `json:"repo" yaml:"repo" mapstructure:"repo"` - - // If true, the repo gpg check will be enabled - RepoGpgCheck bool `json:"repo_gpg_check" yaml:"repo_gpg_check" mapstructure:"repo_gpg_check"` -} - -type SpecKubernetesAdvancedAnsible struct { - // Additional config to append to the ansible.cfg file - Config *string `json:"config,omitempty" yaml:"config,omitempty" mapstructure:"config,omitempty"` - - // The python interpreter to use - PythonInterpreter *string `json:"pythonInterpreter,omitempty" yaml:"pythonInterpreter,omitempty" mapstructure:"pythonInterpreter,omitempty"` -} - -type SpecKubernetesAdvancedCloud struct { - // The cloud config to use - Config *string `json:"config,omitempty" yaml:"config,omitempty" mapstructure:"config,omitempty"` - - // The cloud provider to use - Provider *string `json:"provider,omitempty" yaml:"provider,omitempty" mapstructure:"provider,omitempty"` -} - -type SpecKubernetesAdvancedContainerd struct { - // RegistryConfigs corresponds to the JSON schema field "registryConfigs". - RegistryConfigs SpecKubernetesAdvancedContainerdRegistryConfigs `json:"registryConfigs,omitempty" yaml:"registryConfigs,omitempty" mapstructure:"registryConfigs,omitempty"` -} - -// Allows specifying custom configuration for a registry at containerd level. You -// can set authentication details and mirrors for a registry. -// This feature can be used for example to authenticate to a private registry at -// containerd (container runtime) level, i.e. globally instead of using -// `imagePullSecrets`. It also can be used to use a mirror for a registry or to -// enable insecure connections to trusted registries that don't support TLS. -type SpecKubernetesAdvancedContainerdRegistryConfigs []struct { - // InsecureSkipVerify corresponds to the JSON schema field "insecureSkipVerify". - InsecureSkipVerify *bool `json:"insecureSkipVerify,omitempty" yaml:"insecureSkipVerify,omitempty" mapstructure:"insecureSkipVerify,omitempty"` - - // MirrorEndpoint corresponds to the JSON schema field "mirrorEndpoint". - MirrorEndpoint []string `json:"mirrorEndpoint,omitempty" yaml:"mirrorEndpoint,omitempty" mapstructure:"mirrorEndpoint,omitempty"` - - // Password corresponds to the JSON schema field "password". - Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` - - // Registry corresponds to the JSON schema field "registry". - Registry *string `json:"registry,omitempty" yaml:"registry,omitempty" mapstructure:"registry,omitempty"` - - // Username corresponds to the JSON schema field "username". - Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` -} - -type SpecKubernetesAdvancedEncryption struct { - // The configuration to use - Configuration *string `json:"configuration,omitempty" yaml:"configuration,omitempty" mapstructure:"configuration,omitempty"` - - // The tls cipher suites to use - TlsCipherSuites []string `json:"tlsCipherSuites,omitempty" yaml:"tlsCipherSuites,omitempty" mapstructure:"tlsCipherSuites,omitempty"` -} - -type SpecKubernetesAdvancedOIDC struct { - // The ca file of the oidc provider - CaFile *string `json:"ca_file,omitempty" yaml:"ca_file,omitempty" mapstructure:"ca_file,omitempty"` - - // The client id of the oidc provider - ClientId *string `json:"client_id,omitempty" yaml:"client_id,omitempty" mapstructure:"client_id,omitempty"` - - // GroupPrefix corresponds to the JSON schema field "group_prefix". - GroupPrefix *string `json:"group_prefix,omitempty" yaml:"group_prefix,omitempty" mapstructure:"group_prefix,omitempty"` - - // GroupsClaim corresponds to the JSON schema field "groups_claim". - GroupsClaim *string `json:"groups_claim,omitempty" yaml:"groups_claim,omitempty" mapstructure:"groups_claim,omitempty"` - - // The issuer url of the oidc provider - IssuerUrl *string `json:"issuer_url,omitempty" yaml:"issuer_url,omitempty" mapstructure:"issuer_url,omitempty"` - - // UsernameClaim corresponds to the JSON schema field "username_claim". - UsernameClaim *string `json:"username_claim,omitempty" yaml:"username_claim,omitempty" mapstructure:"username_claim,omitempty"` - - // UsernamePrefix corresponds to the JSON schema field "username_prefix". - UsernamePrefix *string `json:"username_prefix,omitempty" yaml:"username_prefix,omitempty" mapstructure:"username_prefix,omitempty"` -} - -type SpecKubernetesAdvancedUsers struct { - // The names of the users - Names []string `json:"names,omitempty" yaml:"names,omitempty" mapstructure:"names,omitempty"` - - // The org of the users - Org *string `json:"org,omitempty" yaml:"org,omitempty" mapstructure:"org,omitempty"` -} - -type SpecKubernetesLoadBalancers struct { - // The additional config to use - AdditionalConfig *string `json:"additionalConfig,omitempty" yaml:"additionalConfig,omitempty" mapstructure:"additionalConfig,omitempty"` - - // If true, the load balancers will be enabled - Enabled bool `json:"enabled" yaml:"enabled" mapstructure:"enabled"` - - // Hosts corresponds to the JSON schema field "hosts". - Hosts []SpecKubernetesLoadBalancersHost `json:"hosts,omitempty" yaml:"hosts,omitempty" mapstructure:"hosts,omitempty"` - - // Keepalived corresponds to the JSON schema field "keepalived". - Keepalived *SpecKubernetesLoadBalancersKeepalived `json:"keepalived,omitempty" yaml:"keepalived,omitempty" mapstructure:"keepalived,omitempty"` - - // Stats corresponds to the JSON schema field "stats". - Stats *SpecKubernetesLoadBalancersStats `json:"stats,omitempty" yaml:"stats,omitempty" mapstructure:"stats,omitempty"` -} - -type SpecKubernetesLoadBalancersHost struct { - // The IP of the host - Ip string `json:"ip" yaml:"ip" mapstructure:"ip"` - - // The name of the host - Name string `json:"name" yaml:"name" mapstructure:"name"` -} - -type SpecKubernetesLoadBalancersKeepalived struct { - // If true, keepalived will be enabled - Enabled bool `json:"enabled" yaml:"enabled" mapstructure:"enabled"` - - // The interface to use - Interface *string `json:"interface,omitempty" yaml:"interface,omitempty" mapstructure:"interface,omitempty"` - - // The IP to use - Ip *string `json:"ip,omitempty" yaml:"ip,omitempty" mapstructure:"ip,omitempty"` - - // The passphrase to use - Passphrase *string `json:"passphrase,omitempty" yaml:"passphrase,omitempty" mapstructure:"passphrase,omitempty"` - - // The virtual router ID to use - VirtualRouterId *string `json:"virtualRouterId,omitempty" yaml:"virtualRouterId,omitempty" mapstructure:"virtualRouterId,omitempty"` -} - -type SpecKubernetesLoadBalancersStats struct { - // The password to use - Password string `json:"password" yaml:"password" mapstructure:"password"` - - // The username to use - Username string `json:"username" yaml:"username" mapstructure:"username"` -} - -type SpecKubernetesMasters struct { - // Hosts corresponds to the JSON schema field "hosts". - Hosts []SpecKubernetesMastersHost `json:"hosts" yaml:"hosts" mapstructure:"hosts"` -} - -type SpecKubernetesMastersHost struct { - // The IP of the host - Ip string `json:"ip" yaml:"ip" mapstructure:"ip"` - - // The name of the host - Name string `json:"name" yaml:"name" mapstructure:"name"` -} - -type SpecKubernetesNodes []SpecKubernetesNodesNode - -type SpecKubernetesNodesNode struct { - // Hosts corresponds to the JSON schema field "hosts". - Hosts []SpecKubernetesNodesNodeHost `json:"hosts" yaml:"hosts" mapstructure:"hosts"` - - // Name corresponds to the JSON schema field "name". - Name string `json:"name" yaml:"name" mapstructure:"name"` - - // Taints corresponds to the JSON schema field "taints". - Taints []TypesKubeTaints `json:"taints,omitempty" yaml:"taints,omitempty" mapstructure:"taints,omitempty"` -} - -type SpecKubernetesNodesNodeHost struct { - // Ip corresponds to the JSON schema field "ip". - Ip string `json:"ip" yaml:"ip" mapstructure:"ip"` - - // Name corresponds to the JSON schema field "name". - Name string `json:"name" yaml:"name" mapstructure:"name"` -} - -type SpecKubernetesProxy struct { - // The HTTP proxy to use - Http *TypesUri `json:"http,omitempty" yaml:"http,omitempty" mapstructure:"http,omitempty"` - - // The HTTPS proxy to use - Https *TypesUri `json:"https,omitempty" yaml:"https,omitempty" mapstructure:"https,omitempty"` - - // The no proxy to use - NoProxy *string `json:"noProxy,omitempty" yaml:"noProxy,omitempty" mapstructure:"noProxy,omitempty"` -} - -type SpecKubernetesSSH struct { - // The path to the private key to use to connect to the nodes - KeyPath string `json:"keyPath" yaml:"keyPath" mapstructure:"keyPath"` - - // The username to use to connect to the nodes - Username string `json:"username" yaml:"username" mapstructure:"username"` -} - -type SpecPlugins struct { - // Helm corresponds to the JSON schema field "helm". - Helm *SpecPluginsHelm `json:"helm,omitempty" yaml:"helm,omitempty" mapstructure:"helm,omitempty"` - - // Kustomize corresponds to the JSON schema field "kustomize". - Kustomize SpecPluginsKustomize `json:"kustomize,omitempty" yaml:"kustomize,omitempty" mapstructure:"kustomize,omitempty"` -} - -type SpecPluginsHelm struct { - // Releases corresponds to the JSON schema field "releases". - Releases SpecPluginsHelmReleases `json:"releases,omitempty" yaml:"releases,omitempty" mapstructure:"releases,omitempty"` - - // Repositories corresponds to the JSON schema field "repositories". - Repositories SpecPluginsHelmRepositories `json:"repositories,omitempty" yaml:"repositories,omitempty" mapstructure:"repositories,omitempty"` -} - -type SpecPluginsHelmReleases []struct { - // The chart of the release - Chart string `json:"chart" yaml:"chart" mapstructure:"chart"` - - // The name of the release - Name string `json:"name" yaml:"name" mapstructure:"name"` - - // The namespace of the release - Namespace string `json:"namespace" yaml:"namespace" mapstructure:"namespace"` - - // Set corresponds to the JSON schema field "set". - Set []SpecPluginsHelmReleasesElemSetElem `json:"set,omitempty" yaml:"set,omitempty" mapstructure:"set,omitempty"` - - // The values of the release - Values []string `json:"values,omitempty" yaml:"values,omitempty" mapstructure:"values,omitempty"` - - // The version of the release - Version *string `json:"version,omitempty" yaml:"version,omitempty" mapstructure:"version,omitempty"` -} - -type SpecPluginsHelmReleasesElemSetElem struct { - // The name of the set - Name string `json:"name" yaml:"name" mapstructure:"name"` - - // The value of the set - Value string `json:"value" yaml:"value" mapstructure:"value"` -} - -type SpecPluginsHelmRepositories []struct { - // The name of the repository - Name string `json:"name" yaml:"name" mapstructure:"name"` - - // The url of the repository - Url string `json:"url" yaml:"url" mapstructure:"url"` -} - -type SpecPluginsKustomize []struct { - // The folder of the kustomize plugin - Folder string `json:"folder" yaml:"folder" mapstructure:"folder"` - - // The name of the kustomize plugin - Name string `json:"name" yaml:"name" mapstructure:"name"` -} - -type TypesCidr string - -type TypesEnvRef string - -type TypesFileRef string - -type TypesFuryModuleComponentOverrides struct { - // The node selector to use to place the pods for the minio module - NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - - // The tolerations that will be added to the pods for the minio module - Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` -} - -type TypesFuryModuleComponentOverrides_1 struct { - // NodeSelector corresponds to the JSON schema field "nodeSelector". - NodeSelector TypesKubeNodeSelector_1 `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - - // Tolerations corresponds to the JSON schema field "tolerations". - Tolerations []TypesKubeToleration_1 `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` -} - -type TypesFuryModuleOverrides struct { - // Ingresses corresponds to the JSON schema field "ingresses". - Ingresses TypesFuryModuleOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` - - // The node selector to use to place the pods for the tracing module - NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - - // The tolerations that will be added to the pods for the policy module - Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` -} - -type TypesFuryModuleOverridesIngress struct { - // If true, the ingress will not have authentication - DisableAuth *bool `json:"disableAuth,omitempty" yaml:"disableAuth,omitempty" mapstructure:"disableAuth,omitempty"` - - // The host of the ingress - Host *string `json:"host,omitempty" yaml:"host,omitempty" mapstructure:"host,omitempty"` - - // The ingress class of the ingress - IngressClass *string `json:"ingressClass,omitempty" yaml:"ingressClass,omitempty" mapstructure:"ingressClass,omitempty"` -} - -type TypesFuryModuleOverridesIngresses map[string]TypesFuryModuleOverridesIngress - -type TypesIpAddress string - -type TypesKubeLabels map[string]string - -type TypesKubeLabels_1 map[string]string - -type TypesKubeNodeSelector map[string]string - -type TypesKubeNodeSelector_1 map[string]string - -type TypesKubeResources struct { - // Limits corresponds to the JSON schema field "limits". - Limits *TypesKubeResourcesLimits `json:"limits,omitempty" yaml:"limits,omitempty" mapstructure:"limits,omitempty"` - - // Requests corresponds to the JSON schema field "requests". - Requests *TypesKubeResourcesRequests `json:"requests,omitempty" yaml:"requests,omitempty" mapstructure:"requests,omitempty"` -} - -type TypesKubeResourcesLimits struct { - // The cpu limit for the loki pods - Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` - - // The memory limit for the prometheus pods - Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` -} - -type TypesKubeResourcesRequests struct { - // The cpu request for the loki pods - Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` - - // The memory request for the prometheus pods - Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` -} - -type TypesKubeTaints struct { - // Effect corresponds to the JSON schema field "effect". - Effect TypesKubeTaintsEffect `json:"effect" yaml:"effect" mapstructure:"effect"` - - // Key corresponds to the JSON schema field "key". - Key string `json:"key" yaml:"key" mapstructure:"key"` - - // Value corresponds to the JSON schema field "value". - Value string `json:"value" yaml:"value" mapstructure:"value"` -} - -type TypesKubeTaintsEffect string - -const ( - TypesKubeTaintsEffectNoExecute TypesKubeTaintsEffect = "NoExecute" - TypesKubeTaintsEffectNoSchedule TypesKubeTaintsEffect = "NoSchedule" - TypesKubeTaintsEffectPreferNoSchedule TypesKubeTaintsEffect = "PreferNoSchedule" -) - -type TypesKubeToleration struct { - // Effect corresponds to the JSON schema field "effect". - Effect TypesKubeTolerationEffect `json:"effect" yaml:"effect" mapstructure:"effect"` - - // The key of the toleration - Key string `json:"key" yaml:"key" mapstructure:"key"` - - // Operator corresponds to the JSON schema field "operator". - Operator *TypesKubeTolerationOperator `json:"operator,omitempty" yaml:"operator,omitempty" mapstructure:"operator,omitempty"` - - // The value of the toleration - Value *string `json:"value,omitempty" yaml:"value,omitempty" mapstructure:"value,omitempty"` -} - -type TypesKubeTolerationEffect string - -const ( - TypesKubeTolerationEffectNoExecute TypesKubeTolerationEffect = "NoExecute" - TypesKubeTolerationEffectNoSchedule TypesKubeTolerationEffect = "NoSchedule" - TypesKubeTolerationEffectPreferNoSchedule TypesKubeTolerationEffect = "PreferNoSchedule" -) - -type TypesKubeTolerationEffect_1 string - -const ( - TypesKubeTolerationEffect_1_NoExecute TypesKubeTolerationEffect_1 = "NoExecute" - TypesKubeTolerationEffect_1_NoSchedule TypesKubeTolerationEffect_1 = "NoSchedule" - TypesKubeTolerationEffect_1_PreferNoSchedule TypesKubeTolerationEffect_1 = "PreferNoSchedule" -) - -type TypesKubeTolerationOperator string - -const ( - TypesKubeTolerationOperatorEqual TypesKubeTolerationOperator = "Equal" - TypesKubeTolerationOperatorExists TypesKubeTolerationOperator = "Exists" -) - -type TypesKubeTolerationOperator_1 string - -const ( - TypesKubeTolerationOperator_1_Equal TypesKubeTolerationOperator_1 = "Equal" - TypesKubeTolerationOperator_1_Exists TypesKubeTolerationOperator_1 = "Exists" -) - -type TypesKubeToleration_1 struct { - // Effect corresponds to the JSON schema field "effect". - Effect TypesKubeTolerationEffect_1 `json:"effect" yaml:"effect" mapstructure:"effect"` - - // Key corresponds to the JSON schema field "key". - Key string `json:"key" yaml:"key" mapstructure:"key"` - - // Operator corresponds to the JSON schema field "operator". - Operator *TypesKubeTolerationOperator_1 `json:"operator,omitempty" yaml:"operator,omitempty" mapstructure:"operator,omitempty"` - - // Value corresponds to the JSON schema field "value". - Value string `json:"value" yaml:"value" mapstructure:"value"` -} - -type TypesSemVer string - -type TypesSshPubKey string - -type TypesTcpPort int - -type TypesUri string - -var enumValues_OnpremisesKfdV1Alpha2Kind = []interface{}{ - "OnPremises", -} - -var enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = []interface{}{ - "create", - "replace", - "merge", -} - -var enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = []interface{}{ - "create", - "replace", - "merge", -} - -var enumValues_SpecDistributionModulesAuthProviderType = []interface{}{ - "none", - "basicAuth", - "sso", -} - -var enumValues_SpecDistributionModulesDrType = []interface{}{ - "none", - "on-premises", -} - -var enumValues_SpecDistributionModulesDrVeleroBackend = []interface{}{ - "minio", - "externalEndpoint", -} - -var enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType = []interface{}{ - "http01", -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodesNodeHost) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["ip"]; !ok || v == nil { - return fmt.Errorf("field ip in SpecKubernetesNodesNodeHost: required") - } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecKubernetesNodesNodeHost: required") - } - type Plain SpecKubernetesNodesNodeHost - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecKubernetesNodesNodeHost(plain) - return nil -} - -var enumValues_SpecDistributionModulesLoggingLokiBackend = []interface{}{ - "minio", - "externalEndpoint", +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesLoggingLokiBackend) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesLoggingLokiBackend { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingLokiBackend, v) + } + *j = SpecDistributionModulesLoggingLokiBackend(v) + return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingCustomOutputs) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressNginxTLSSecret) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["audit"]; !ok || v == nil { - return fmt.Errorf("field audit in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["errors"]; !ok || v == nil { - return fmt.Errorf("field errors in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["events"]; !ok || v == nil { - return fmt.Errorf("field events in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["infra"]; !ok || v == nil { - return fmt.Errorf("field infra in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["ingressNginx"]; !ok || v == nil { - return fmt.Errorf("field ingressNginx in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["kubernetes"]; !ok || v == nil { - return fmt.Errorf("field kubernetes in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["systemdCommon"]; !ok || v == nil { - return fmt.Errorf("field systemdCommon in SpecDistributionModulesLoggingCustomOutputs: required") + if v, ok := raw["ca"]; !ok || v == nil { + return fmt.Errorf("field ca in SpecDistributionModulesIngressNginxTLSSecret: required") } - if v, ok := raw["systemdEtcd"]; !ok || v == nil { - return fmt.Errorf("field systemdEtcd in SpecDistributionModulesLoggingCustomOutputs: required") + if v, ok := raw["cert"]; !ok || v == nil { + return fmt.Errorf("field cert in SpecDistributionModulesIngressNginxTLSSecret: required") } - type Plain SpecDistributionModulesLoggingCustomOutputs + if v, ok := raw["key"]; !ok || v == nil { + return fmt.Errorf("field key in SpecDistributionModulesIngressNginxTLSSecret: required") + } + type Plain SpecDistributionModulesIngressNginxTLSSecret var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesLoggingCustomOutputs(plain) + *j = SpecDistributionModulesIngressNginxTLSSecret(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesTracingType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressNginxTLSProvider) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesTracingType { + for _, expected := range enumValues_SpecDistributionModulesIngressNginxTLSProvider { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxTLSProvider, v) } - *j = SpecDistributionModulesTracingType(v) + *j = SpecDistributionModulesIngressNginxTLSProvider(v) return nil } -var enumValues_SpecDistributionModulesTracingType = []interface{}{ +var enumValues_SpecDistributionModulesIngressNginxTLSProvider = []interface{}{ + "certManager", + "secret", "none", - "tempo", } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingLokiBackend) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesLoggingLokiBackend { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingLokiBackend, v) - } - *j = SpecDistributionModulesLoggingLokiBackend(v) - return nil +type TypesKubeResourcesLimits struct { + // The CPU limit for the Pod. Example: `1000m`. + Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` + + // The memory limit for the Pod. Example: `1G`. + Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` +} + +type TypesKubeResourcesRequests struct { + // The CPU request for the Pod, in cores. Example: `500m`. + Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` + + // The memory request for the Pod. Example: `500M`. + Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` +} + +type TypesKubeResources struct { + // Limits corresponds to the JSON schema field "limits". + Limits *TypesKubeResourcesLimits `json:"limits,omitempty" yaml:"limits,omitempty" mapstructure:"limits,omitempty"` + + // Requests corresponds to the JSON schema field "requests". + Requests *TypesKubeResourcesRequests `json:"requests,omitempty" yaml:"requests,omitempty" mapstructure:"requests,omitempty"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngress) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressCertManager) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["baseDomain"]; !ok || v == nil { - return fmt.Errorf("field baseDomain in SpecDistributionModulesIngress: required") - } - if v, ok := raw["nginx"]; !ok || v == nil { - return fmt.Errorf("field nginx in SpecDistributionModulesIngress: required") + if v, ok := raw["clusterIssuer"]; !ok || v == nil { + return fmt.Errorf("field clusterIssuer in SpecDistributionModulesIngressCertManager: required") } - type Plain SpecDistributionModulesIngress + type Plain SpecDistributionModulesIngressCertManager var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngress(plain) + *j = SpecDistributionModulesIngressCertManager(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginx) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLoggingLoki) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesIngressNginx: required") + if v, ok := raw["tsdbStartDate"]; !ok || v == nil { + return fmt.Errorf("field tsdbStartDate in SpecDistributionModulesLoggingLoki: required") } - type Plain SpecDistributionModulesIngressNginx + type Plain SpecDistributionModulesLoggingLoki var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressNginx(plain) + *j = SpecDistributionModulesLoggingLoki(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesLoadBalancersHost) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressCertManagerClusterIssuer) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["ip"]; !ok || v == nil { - return fmt.Errorf("field ip in SpecKubernetesLoadBalancersHost: required") + if v, ok := raw["email"]; !ok || v == nil { + return fmt.Errorf("field email in SpecDistributionModulesIngressCertManagerClusterIssuer: required") } if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecKubernetesLoadBalancersHost: required") + return fmt.Errorf("field name in SpecDistributionModulesIngressCertManagerClusterIssuer: required") } - type Plain SpecKubernetesLoadBalancersHost + type Plain SpecDistributionModulesIngressCertManagerClusterIssuer var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesLoadBalancersHost(plain) + *j = SpecDistributionModulesIngressCertManagerClusterIssuer(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressCertManagerClusterIssuerType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesIngressNginxType { + for _, expected := range enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType, v) } - *j = SpecDistributionModulesIngressNginxType(v) + *j = SpecDistributionModulesIngressCertManagerClusterIssuerType(v) + return nil +} + +var enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType = []interface{}{ + "http01", +} + +var enumValues_SpecDistributionModulesLoggingOpensearchType = []interface{}{ + "single", + "triple", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesLoggingOpensearchType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesLoggingOpensearchType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingOpensearchType, v) + } + *j = SpecDistributionModulesLoggingOpensearchType(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesLoadBalancersKeepalived) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesDr) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["enabled"]; !ok || v == nil { - return fmt.Errorf("field enabled in SpecKubernetesLoadBalancersKeepalived: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesDr: required") } - type Plain SpecKubernetesLoadBalancersKeepalived + type Plain SpecDistributionModulesDr var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesLoadBalancersKeepalived(plain) + *j = SpecDistributionModulesDr(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesTracingTempoBackend) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesDrVeleroBackend) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesTracingTempoBackend { + for _, expected := range enumValues_SpecDistributionModulesDrVeleroBackend { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingTempoBackend, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrVeleroBackend, v) } - *j = SpecDistributionModulesTracingTempoBackend(v) + *j = SpecDistributionModulesDrVeleroBackend(v) return nil } +var enumValues_SpecDistributionModulesDrVeleroBackend = []interface{}{ + "minio", + "externalEndpoint", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesLoadBalancersStats) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLoggingOpensearch) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["password"]; !ok || v == nil { - return fmt.Errorf("field password in SpecKubernetesLoadBalancersStats: required") - } - if v, ok := raw["username"]; !ok || v == nil { - return fmt.Errorf("field username in SpecKubernetesLoadBalancersStats: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesLoggingOpensearch: required") } - type Plain SpecKubernetesLoadBalancersStats + type Plain SpecDistributionModulesLoggingOpensearch var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesLoadBalancersStats(plain) + *j = SpecDistributionModulesLoggingOpensearch(plain) return nil } -var enumValues_SpecDistributionModulesTracingTempoBackend = []interface{}{ - "minio", - "externalEndpoint", +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesDrType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesDrType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrType, v) + } + *j = SpecDistributionModulesDrType(v) + return nil +} + +var enumValues_SpecDistributionModulesDrType = []interface{}{ + "none", + "on-premises", +} + +var enumValues_SpecDistributionModulesLoggingType = []interface{}{ + "none", + "opensearch", + "loki", + "customOutputs", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLoggingType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior { + for _, expected := range enumValues_SpecDistributionModulesLoggingType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingType, v) } - *j = SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior(v) + *j = SpecDistributionModulesLoggingType(v) return nil } -var enumValues_SpecDistributionModulesIngressNginxType = []interface{}{ - "none", - "single", - "dual", +// Override the common configuration with a particular configuration for the +// module. +type TypesFuryModuleOverrides struct { + // Ingresses corresponds to the JSON schema field "ingresses". + Ingresses TypesFuryModuleOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` + + // Set to override the node selector used to place the pods of the module. + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // Set to override the tolerations that will be added to the pods of the module. + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +} + +type TypesFuryModuleOverridesIngresses map[string]TypesFuryModuleOverridesIngress + +type TypesFuryModuleOverridesIngress struct { + // If true, the ingress will not have authentication even if + // `.spec.modules.auth.provider.type` is SSO or Basic Auth. + DisableAuth *bool `json:"disableAuth,omitempty" yaml:"disableAuth,omitempty" mapstructure:"disableAuth,omitempty"` + + // Use this host for the ingress instead of the default one. + Host *string `json:"host,omitempty" yaml:"host,omitempty" mapstructure:"host,omitempty"` + + // Use this ingress class for the ingress instead of the default one. + IngressClass *string `json:"ingressClass,omitempty" yaml:"ingressClass,omitempty" mapstructure:"ingressClass,omitempty"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesMastersHost) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuth) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["ip"]; !ok || v == nil { - return fmt.Errorf("field ip in SpecKubernetesMastersHost: required") + if v, ok := raw["provider"]; !ok || v == nil { + return fmt.Errorf("field provider in SpecDistributionModulesAuth: required") } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecKubernetesMastersHost: required") + type Plain SpecDistributionModulesAuth + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err } - type Plain SpecKubernetesMastersHost + *j = SpecDistributionModulesAuth(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuthProvider) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesAuthProvider: required") + } + type Plain SpecDistributionModulesAuthProvider var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesMastersHost(plain) + *j = SpecDistributionModulesAuthProvider(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicy) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLogging) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesPolicy: required") + return fmt.Errorf("field type in SpecDistributionModulesLogging: required") } - type Plain SpecDistributionModulesPolicy + type Plain SpecDistributionModulesLogging var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesPolicy(plain) + *j = SpecDistributionModulesLogging(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesMasters) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecDistributionModulesAuthProviderType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["hosts"]; !ok || v == nil { - return fmt.Errorf("field hosts in SpecKubernetesMasters: required") + var ok bool + for _, expected := range enumValues_SpecDistributionModulesAuthProviderType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecKubernetesMasters - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesAuthProviderType, v) } - *j = SpecKubernetesMasters(plain) + *j = SpecDistributionModulesAuthProviderType(v) return nil } +var enumValues_SpecDistributionModulesAuthProviderType = []interface{}{ + "none", + "basicAuth", + "sso", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxTLS) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuthProviderBasicAuth) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["provider"]; !ok || v == nil { - return fmt.Errorf("field provider in SpecDistributionModulesIngressNginxTLS: required") + if v, ok := raw["password"]; !ok || v == nil { + return fmt.Errorf("field password in SpecDistributionModulesAuthProviderBasicAuth: required") } - type Plain SpecDistributionModulesIngressNginxTLS + if v, ok := raw["username"]; !ok || v == nil { + return fmt.Errorf("field username in SpecDistributionModulesAuthProviderBasicAuth: required") + } + type Plain SpecDistributionModulesAuthProviderBasicAuth var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressNginxTLS(plain) + *j = SpecDistributionModulesAuthProviderBasicAuth(plain) return nil } -var enumValues_SpecDistributionModulesLoggingType = []interface{}{ - "none", - "opensearch", - "loki", - "customOutputs", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxTLSSecret) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuthOverridesIngress) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["ca"]; !ok || v == nil { - return fmt.Errorf("field ca in SpecDistributionModulesIngressNginxTLSSecret: required") - } - if v, ok := raw["cert"]; !ok || v == nil { - return fmt.Errorf("field cert in SpecDistributionModulesIngressNginxTLSSecret: required") + if v, ok := raw["host"]; !ok || v == nil { + return fmt.Errorf("field host in SpecDistributionModulesAuthOverridesIngress: required") } - if v, ok := raw["key"]; !ok || v == nil { - return fmt.Errorf("field key in SpecDistributionModulesIngressNginxTLSSecret: required") + if v, ok := raw["ingressClass"]; !ok || v == nil { + return fmt.Errorf("field ingressClass in SpecDistributionModulesAuthOverridesIngress: required") } - type Plain SpecDistributionModulesIngressNginxTLSSecret + type Plain SpecDistributionModulesAuthOverridesIngress var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressNginxTLSSecret(plain) + *j = SpecDistributionModulesAuthOverridesIngress(plain) return nil } -var enumValues_TypesKubeTaintsEffect = []interface{}{ - "NoSchedule", - "PreferNoSchedule", - "NoExecute", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeTaintsEffect) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecDistributionModulesAuthOIDCKubernetesAuth) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_TypesKubeTaintsEffect { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["enabled"]; !ok || v == nil { + return fmt.Errorf("field enabled in SpecDistributionModulesAuthOIDCKubernetesAuth: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTaintsEffect, v) + type Plain SpecDistributionModulesAuthOIDCKubernetesAuth + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err } - *j = TypesKubeTaintsEffect(v) + *j = SpecDistributionModulesAuthOIDCKubernetesAuth(plain) return nil } +var enumValues_SpecDistributionModulesMonitoringMimirBackend = []interface{}{ + "minio", + "externalEndpoint", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxTLSProvider) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesMonitoringMimirBackend) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesIngressNginxTLSProvider { + for _, expected := range enumValues_SpecDistributionModulesMonitoringMimirBackend { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxTLSProvider, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringMimirBackend, v) } - *j = SpecDistributionModulesIngressNginxTLSProvider(v) + *j = SpecDistributionModulesMonitoringMimirBackend(v) return nil } -var enumValues_SpecDistributionModulesLoggingOpensearchType = []interface{}{ - "single", - "triple", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecDistributionModulesAuthDex) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesPolicyType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["connectors"]; !ok || v == nil { + return fmt.Errorf("field connectors in SpecDistributionModulesAuthDex: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyType, v) + type Plain SpecDistributionModulesAuthDex + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err } - *j = SpecDistributionModulesPolicyType(v) + *j = SpecDistributionModulesAuthDex(plain) return nil } -var enumValues_SpecDistributionModulesPolicyType = []interface{}{ - "none", - "gatekeeper", - "kyverno", +type TypesFuryModuleComponentOverrides struct { + // Set to override the node selector used to place the pods of the package. + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // Set to override the tolerations that will be added to the pods of the package. + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeTaints) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionCustomPatchesSecretGeneratorResource) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["effect"]; !ok || v == nil { - return fmt.Errorf("field effect in TypesKubeTaints: required") - } - if v, ok := raw["key"]; !ok || v == nil { - return fmt.Errorf("field key in TypesKubeTaints: required") - } - if v, ok := raw["value"]; !ok || v == nil { - return fmt.Errorf("field value in TypesKubeTaints: required") + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionCustomPatchesSecretGeneratorResource: required") } - type Plain TypesKubeTaints + type Plain SpecDistributionCustomPatchesSecretGeneratorResource var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = TypesKubeTaints(plain) + *j = SpecDistributionCustomPatchesSecretGeneratorResource(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingOpensearchType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionCustomPatchesSecretGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesLoggingOpensearchType { + for _, expected := range enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingOpensearchType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior, v) } - *j = SpecDistributionModulesLoggingOpensearchType(v) + *j = SpecDistributionCustomPatchesSecretGeneratorResourceBehavior(v) return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesAdvancedAirGapDependenciesOverrideApt) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["gpg_key"]; !ok || v == nil { - return fmt.Errorf("field gpg_key in SpecKubernetesAdvancedAirGapDependenciesOverrideApt: required") - } - if v, ok := raw["gpg_key_id"]; !ok || v == nil { - return fmt.Errorf("field gpg_key_id in SpecKubernetesAdvancedAirGapDependenciesOverrideApt: required") - } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecKubernetesAdvancedAirGapDependenciesOverrideApt: required") - } - if v, ok := raw["repo"]; !ok || v == nil { - return fmt.Errorf("field repo in SpecKubernetesAdvancedAirGapDependenciesOverrideApt: required") - } - type Plain SpecKubernetesAdvancedAirGapDependenciesOverrideApt - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecKubernetesAdvancedAirGapDependenciesOverrideApt(plain) - return nil +type SpecDistributionModulesMonitoringMinioRootUser struct { + // The password for the default MinIO root user. + Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` + + // The username for the default MinIO root user. + Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyKyverno) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +// Configuration for Monitoring's MinIO deployment. +type SpecDistributionModulesMonitoringMinio struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // RootUser corresponds to the JSON schema field "rootUser". + RootUser *SpecDistributionModulesMonitoringMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` + + // The PVC size for each MinIO disk, 6 disks total. + StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` +} + +type SpecDistributionModulesMonitoringPrometheusRemoteWriteElem map[string]interface{} + +type SpecDistributionModulesMonitoringPrometheus struct { + // Set this option to ship the collected metrics to a remote Prometheus receiver. + // + // `remoteWrite` is an array of objects that allows configuring the + // [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for + // Prometheus. The objects in the array follow [the same schema as in the + // prometheus + // operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec). + RemoteWrite []SpecDistributionModulesMonitoringPrometheusRemoteWriteElem `json:"remoteWrite,omitempty" yaml:"remoteWrite,omitempty" mapstructure:"remoteWrite,omitempty"` + + // Resources corresponds to the JSON schema field "resources". + Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` + + // The retention size for the `k8s` Prometheus instance. + RetentionSize *string `json:"retentionSize,omitempty" yaml:"retentionSize,omitempty" mapstructure:"retentionSize,omitempty"` + + // The retention time for the `k8s` Prometheus instance. + RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` + + // The storage size for the `k8s` Prometheus instance. + StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` +} + +type SpecDistributionModulesMonitoringPrometheusAgentRemoteWriteElem map[string]interface{} + +type SpecDistributionModulesMonitoringPrometheusAgent struct { + // Set this option to ship the collected metrics to a remote Prometheus receiver. + // + // `remoteWrite` is an array of objects that allows configuring the + // [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for + // Prometheus. The objects in the array follow [the same schema as in the + // prometheus + // operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec). + RemoteWrite []SpecDistributionModulesMonitoringPrometheusAgentRemoteWriteElem `json:"remoteWrite,omitempty" yaml:"remoteWrite,omitempty" mapstructure:"remoteWrite,omitempty"` + + // Resources corresponds to the JSON schema field "resources". + Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` +} + +type SpecDistributionModulesMonitoringType string + +var enumValues_SpecDistributionModulesMonitoringType = []interface{}{ + "none", + "prometheus", + "prometheusAgent", + "mimir", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesMonitoringType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { - return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyKyverno: required") - } - if v, ok := raw["validationFailureAction"]; !ok || v == nil { - return fmt.Errorf("field validationFailureAction in SpecDistributionModulesPolicyKyverno: required") + var ok bool + for _, expected := range enumValues_SpecDistributionModulesMonitoringType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecDistributionModulesPolicyKyverno - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringType, v) } - *j = SpecDistributionModulesPolicyKyverno(plain) + *j = SpecDistributionModulesMonitoringType(v) return nil } -var enumValues_SpecDistributionModulesIngressNginxTLSProvider = []interface{}{ - "certManager", - "secret", - "none", +const ( + SpecDistributionModulesMonitoringTypeNone SpecDistributionModulesMonitoringType = "none" + SpecDistributionModulesMonitoringTypePrometheus SpecDistributionModulesMonitoringType = "prometheus" + SpecDistributionModulesMonitoringTypePrometheusAgent SpecDistributionModulesMonitoringType = "prometheusAgent" + SpecDistributionModulesMonitoringTypeMimir SpecDistributionModulesMonitoringType = "mimir" +) + +type SpecDistributionModulesMonitoringX509Exporter struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +var enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = []interface{}{ + "create", + "replace", + "merge", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressCertManager) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesMonitoring) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["clusterIssuer"]; !ok || v == nil { - return fmt.Errorf("field clusterIssuer in SpecDistributionModulesIngressCertManager: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesMonitoring: required") } - type Plain SpecDistributionModulesIngressCertManager + type Plain SpecDistributionModulesMonitoring var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressCertManager(plain) + *j = SpecDistributionModulesMonitoring(plain) return nil } +type TypesCidr string + +type SpecDistributionModulesNetworkingCilium struct { + // The mask size to use for the Pods network on each node. + MaskSize *string `json:"maskSize,omitempty" yaml:"maskSize,omitempty" mapstructure:"maskSize,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // Allows specifing a CIDR for the Pods network different from + // `.spec.kubernetes.podCidr`. If not set the default is to use + // `.spec.kubernetes.podCidr`. + PodCidr *TypesCidr `json:"podCidr,omitempty" yaml:"podCidr,omitempty" mapstructure:"podCidr,omitempty"` +} + +type SpecDistributionModulesNetworkingTigeraOperator struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +type SpecDistributionModulesNetworkingType string + +var enumValues_SpecDistributionModulesNetworkingType = []interface{}{ + "calico", + "cilium", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressCertManagerClusterIssuer) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecDistributionModulesNetworkingType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["email"]; !ok || v == nil { - return fmt.Errorf("field email in SpecDistributionModulesIngressCertManagerClusterIssuer: required") - } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionModulesIngressCertManagerClusterIssuer: required") + var ok bool + for _, expected := range enumValues_SpecDistributionModulesNetworkingType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecDistributionModulesIngressCertManagerClusterIssuer - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesNetworkingType, v) } - *j = SpecDistributionModulesIngressCertManagerClusterIssuer(plain) + *j = SpecDistributionModulesNetworkingType(v) return nil } +const ( + SpecDistributionModulesNetworkingTypeCalico SpecDistributionModulesNetworkingType = "calico" + SpecDistributionModulesNetworkingTypeCilium SpecDistributionModulesNetworkingType = "cilium" +) + +// Configuration for the Networking module. +type SpecDistributionModulesNetworking struct { + // Cilium corresponds to the JSON schema field "cilium". + Cilium *SpecDistributionModulesNetworkingCilium `json:"cilium,omitempty" yaml:"cilium,omitempty" mapstructure:"cilium,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // TigeraOperator corresponds to the JSON schema field "tigeraOperator". + TigeraOperator *SpecDistributionModulesNetworkingTigeraOperator `json:"tigeraOperator,omitempty" yaml:"tigeraOperator,omitempty" mapstructure:"tigeraOperator,omitempty"` + + // The type of CNI plugin to use, either `calico` (Tigera Operator) or `cilium`. + // Default is `calico`. + Type SpecDistributionModulesNetworkingType `json:"type" yaml:"type" mapstructure:"type"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesSSH) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesNetworking) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["keyPath"]; !ok || v == nil { - return fmt.Errorf("field keyPath in SpecKubernetesSSH: required") - } - if v, ok := raw["username"]; !ok || v == nil { - return fmt.Errorf("field username in SpecKubernetesSSH: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesNetworking: required") } - type Plain SpecKubernetesSSH + type Plain SpecDistributionModulesNetworking var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesSSH(plain) + *j = SpecDistributionModulesNetworking(plain) return nil } +type SpecDistributionModulesPolicyGatekeeperEnforcementAction string + +var enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction = []interface{}{ + "deny", + "dryrun", + "warn", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyKyvernoValidationFailureAction) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesPolicyGatekeeperEnforcementAction) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction { + for _, expected := range enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction, v) } - *j = SpecDistributionModulesPolicyKyvernoValidationFailureAction(v) + *j = SpecDistributionModulesPolicyGatekeeperEnforcementAction(v) return nil } +const ( + SpecDistributionModulesPolicyGatekeeperEnforcementActionDeny SpecDistributionModulesPolicyGatekeeperEnforcementAction = "deny" + SpecDistributionModulesPolicyGatekeeperEnforcementActionDryrun SpecDistributionModulesPolicyGatekeeperEnforcementAction = "dryrun" + SpecDistributionModulesPolicyGatekeeperEnforcementActionWarn SpecDistributionModulesPolicyGatekeeperEnforcementAction = "warn" +) + +// Configuration for the Gatekeeper package. +type SpecDistributionModulesPolicyGatekeeper struct { + // This parameter adds namespaces to Gatekeeper's exemption list, so it will not + // enforce the constraints on them. + AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"` + + // The default enforcement action to use for the included constraints. `deny` will + // block the admission when violations to the policies are found, `warn` will show + // a message to the user but will admit the violating requests and `dryrun` won't + // give any feedback to the user but it will log the violations. + EnforcementAction SpecDistributionModulesPolicyGatekeeperEnforcementAction `json:"enforcementAction" yaml:"enforcementAction" mapstructure:"enforcementAction"` + + // Set to `false` to avoid installing the default Gatekeeper policies (constraints + // templates and constraints) included with the distribution. + InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetes) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesPolicyGatekeeper) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["controlPlaneAddress"]; !ok || v == nil { - return fmt.Errorf("field controlPlaneAddress in SpecKubernetes: required") - } - if v, ok := raw["dnsZone"]; !ok || v == nil { - return fmt.Errorf("field dnsZone in SpecKubernetes: required") - } - if v, ok := raw["loadBalancers"]; !ok || v == nil { - return fmt.Errorf("field loadBalancers in SpecKubernetes: required") - } - if v, ok := raw["masters"]; !ok || v == nil { - return fmt.Errorf("field masters in SpecKubernetes: required") - } - if v, ok := raw["nodes"]; !ok || v == nil { - return fmt.Errorf("field nodes in SpecKubernetes: required") - } - if v, ok := raw["pkiFolder"]; !ok || v == nil { - return fmt.Errorf("field pkiFolder in SpecKubernetes: required") - } - if v, ok := raw["podCidr"]; !ok || v == nil { - return fmt.Errorf("field podCidr in SpecKubernetes: required") - } - if v, ok := raw["ssh"]; !ok || v == nil { - return fmt.Errorf("field ssh in SpecKubernetes: required") + if v, ok := raw["enforcementAction"]; !ok || v == nil { + return fmt.Errorf("field enforcementAction in SpecDistributionModulesPolicyGatekeeper: required") } - if v, ok := raw["svcCidr"]; !ok || v == nil { - return fmt.Errorf("field svcCidr in SpecKubernetes: required") + if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { + return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyGatekeeper: required") } - type Plain SpecKubernetes + type Plain SpecDistributionModulesPolicyGatekeeper var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetes(plain) + *j = SpecDistributionModulesPolicyGatekeeper(plain) return nil } +type SpecDistributionModulesPolicyKyvernoValidationFailureAction string + var enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction = []interface{}{ - "audit", - "enforce", + "Audit", + "Enforce", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecPluginsHelmReleasesElemSetElem) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecDistributionModulesPolicyKyvernoValidationFailureAction) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecPluginsHelmReleasesElemSetElem: required") - } - if v, ok := raw["value"]; !ok || v == nil { - return fmt.Errorf("field value in SpecPluginsHelmReleasesElemSetElem: required") + var ok bool + for _, expected := range enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecPluginsHelmReleasesElemSetElem - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction, v) } - *j = SpecPluginsHelmReleasesElemSetElem(plain) + *j = SpecDistributionModulesPolicyKyvernoValidationFailureAction(v) return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingOpensearch) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesLoggingOpensearch: required") - } - type Plain SpecDistributionModulesLoggingOpensearch - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesLoggingOpensearch(plain) - return nil +const ( + SpecDistributionModulesPolicyKyvernoValidationFailureActionAudit SpecDistributionModulesPolicyKyvernoValidationFailureAction = "Audit" + SpecDistributionModulesPolicyKyvernoValidationFailureActionEnforce SpecDistributionModulesPolicyKyvernoValidationFailureAction = "Enforce" +) + +// Configuration for the Kyverno package. +type SpecDistributionModulesPolicyKyverno struct { + // This parameter adds namespaces to Kyverno's exemption list, so it will not + // enforce the policies on them. + AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"` + + // Set to `false` to avoid installing the default Kyverno policies included with + // distribution. + InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // The validation failure action to use for the policies, `Enforce` will block + // when a request does not comply with the policies and `Audit` will not block but + // log when a request does not comply with the policies. + ValidationFailureAction SpecDistributionModulesPolicyKyvernoValidationFailureAction `json:"validationFailureAction" yaml:"validationFailureAction" mapstructure:"validationFailureAction"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyGatekeeper) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesPolicyKyverno) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["enforcementAction"]; !ok || v == nil { - return fmt.Errorf("field enforcementAction in SpecDistributionModulesPolicyGatekeeper: required") - } if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { - return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyGatekeeper: required") + return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyKyverno: required") } - type Plain SpecDistributionModulesPolicyGatekeeper + if v, ok := raw["validationFailureAction"]; !ok || v == nil { + return fmt.Errorf("field validationFailureAction in SpecDistributionModulesPolicyKyverno: required") + } + type Plain SpecDistributionModulesPolicyKyverno var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesPolicyGatekeeper(plain) + *j = SpecDistributionModulesPolicyKyverno(plain) return nil } +type SpecDistributionModulesPolicyType string + +var enumValues_SpecDistributionModulesPolicyType = []interface{}{ + "none", + "gatekeeper", + "kyverno", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressCertManagerClusterIssuerType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesPolicyType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType { + for _, expected := range enumValues_SpecDistributionModulesPolicyType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyType, v) } - *j = SpecDistributionModulesIngressCertManagerClusterIssuerType(v) + *j = SpecDistributionModulesPolicyType(v) return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodesNode) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["hosts"]; !ok || v == nil { - return fmt.Errorf("field hosts in SpecKubernetesNodesNode: required") - } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecKubernetesNodesNode: required") - } - type Plain SpecKubernetesNodesNode - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - if plain.Hosts != nil && len(plain.Hosts) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "hosts", 1) - } - *j = SpecKubernetesNodesNode(plain) - return nil +const ( + SpecDistributionModulesPolicyTypeNone SpecDistributionModulesPolicyType = "none" + SpecDistributionModulesPolicyTypeGatekeeper SpecDistributionModulesPolicyType = "gatekeeper" + SpecDistributionModulesPolicyTypeKyverno SpecDistributionModulesPolicyType = "kyverno" +) + +// Configuration for the Policy module. +type SpecDistributionModulesPolicy struct { + // Gatekeeper corresponds to the JSON schema field "gatekeeper". + Gatekeeper *SpecDistributionModulesPolicyGatekeeper `json:"gatekeeper,omitempty" yaml:"gatekeeper,omitempty" mapstructure:"gatekeeper,omitempty"` + + // Kyverno corresponds to the JSON schema field "kyverno". + Kyverno *SpecDistributionModulesPolicyKyverno `json:"kyverno,omitempty" yaml:"kyverno,omitempty" mapstructure:"kyverno,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // The type of policy enforcement to use, either `none`, `gatekeeper` or + // `kyverno`. + // + // Default is `none`. + Type SpecDistributionModulesPolicyType `json:"type" yaml:"type" mapstructure:"type"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesTracing) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesPolicy) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesTracing: required") + return fmt.Errorf("field type in SpecDistributionModulesPolicy: required") } - type Plain SpecDistributionModulesTracing + type Plain SpecDistributionModulesPolicy var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesTracing(plain) + *j = SpecDistributionModulesPolicy(plain) return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesLoadBalancers) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["enabled"]; !ok || v == nil { - return fmt.Errorf("field enabled in SpecKubernetesLoadBalancers: required") - } - type Plain SpecKubernetesLoadBalancers - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecKubernetesLoadBalancers(plain) - return nil +type SpecDistributionModulesTracingMinioRootUser struct { + // The password for the default MinIO root user. + Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` + + // The username for the default MinIO root user. + Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` +} + +// Configuration for Tracing's MinIO deployment. +type SpecDistributionModulesTracingMinio struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // RootUser corresponds to the JSON schema field "rootUser". + RootUser *SpecDistributionModulesTracingMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` + + // The PVC size for each MinIO disk, 6 disks total. + StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` +} + +type SpecDistributionModulesTracingTempoBackend string + +var enumValues_SpecDistributionModulesTracingTempoBackend = []interface{}{ + "minio", + "externalEndpoint", } // UnmarshalJSON implements json.Unmarshaler. -func (j *Spec) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecDistributionModulesTracingTempoBackend) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["distribution"]; !ok || v == nil { - return fmt.Errorf("field distribution in Spec: required") - } - if v, ok := raw["distributionVersion"]; !ok || v == nil { - return fmt.Errorf("field distributionVersion in Spec: required") - } - type Plain Spec - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + var ok bool + for _, expected := range enumValues_SpecDistributionModulesTracingTempoBackend { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - if len(plain.DistributionVersion) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "distributionVersion", 1) + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingTempoBackend, v) } - *j = Spec(plain) + *j = SpecDistributionModulesTracingTempoBackend(v) return nil } +const ( + SpecDistributionModulesTracingTempoBackendMinio SpecDistributionModulesTracingTempoBackend = "minio" + SpecDistributionModulesTracingTempoBackendExternalEndpoint SpecDistributionModulesTracingTempoBackend = "externalEndpoint" +) + +// Configuration for Tempo's external storage backend. +type SpecDistributionModulesTracingTempoExternalEndpoint struct { + // The access key ID (username) for the external S3-compatible bucket. + AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` + + // The bucket name of the external S3-compatible object storage. + BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` + + // The external S3-compatible endpoint for Tempo's storage. + Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` + + // If true, will use HTTP as protocol instead of HTTPS. + Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` + + // The secret access key (password) for the external S3-compatible bucket. + SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` +} + +// Configuration for the Tempo package. +type SpecDistributionModulesTracingTempo struct { + // The storage backend type for Tempo. `minio` will use an in-cluster MinIO + // deployment for object storage, `externalEndpoint` can be used to point to an + // external S3-compatible object storage instead of deploying an in-cluster MinIO. + Backend *SpecDistributionModulesTracingTempoBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` + + // Configuration for Tempo's external storage backend. + ExternalEndpoint *SpecDistributionModulesTracingTempoExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // The retention time for the traces stored in Tempo. + RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` +} + +type SpecDistributionModulesTracingType string + +var enumValues_SpecDistributionModulesTracingType = []interface{}{ + "none", + "tempo", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesConfigMapGeneratorResource) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressNginx) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionCustomPatchesConfigMapGeneratorResource: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesIngressNginx: required") } - type Plain SpecDistributionCustomPatchesConfigMapGeneratorResource + type Plain SpecDistributionModulesIngressNginx var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionCustomPatchesConfigMapGeneratorResource(plain) + *j = SpecDistributionModulesIngressNginx(plain) return nil } +const ( + SpecDistributionModulesTracingTypeNone SpecDistributionModulesTracingType = "none" + SpecDistributionModulesTracingTypeTempo SpecDistributionModulesTracingType = "tempo" +) + +// Configuration for the Tracing module. +type SpecDistributionModulesTracing struct { + // Minio corresponds to the JSON schema field "minio". + Minio *SpecDistributionModulesTracingMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // Tempo corresponds to the JSON schema field "tempo". + Tempo *SpecDistributionModulesTracingTempo `json:"tempo,omitempty" yaml:"tempo,omitempty" mapstructure:"tempo,omitempty"` + + // The type of tracing to use, either `none` or `tempo`. `none` will disable the + // Tracing module and `tempo` will install a Grafana Tempo deployment. + // + // Default is `tempo`. + Type SpecDistributionModulesTracingType `json:"type" yaml:"type" mapstructure:"type"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeToleration) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesTracing) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["effect"]; !ok || v == nil { - return fmt.Errorf("field effect in TypesKubeToleration: required") - } - if v, ok := raw["key"]; !ok || v == nil { - return fmt.Errorf("field key in TypesKubeToleration: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesTracing: required") } - type Plain TypesKubeToleration + type Plain SpecDistributionModulesTracing var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = TypesKubeToleration(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesLoggingType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingType, v) - } - *j = SpecDistributionModulesLoggingType(v) + *j = SpecDistributionModulesTracing(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyGatekeeperEnforcementAction) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecDistributionCustomPatchesConfigMapGeneratorResource) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction, v) + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionCustomPatchesConfigMapGeneratorResource: required") } - *j = SpecDistributionModulesPolicyGatekeeperEnforcementAction(v) + type Plain SpecDistributionCustomPatchesConfigMapGeneratorResource + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionCustomPatchesConfigMapGeneratorResource(plain) return nil } -var enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction = []interface{}{ - "deny", - "dryrun", - "warn", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeTolerationOperator) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecDistributionModules) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_TypesKubeTolerationOperator { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["dr"]; !ok || v == nil { + return fmt.Errorf("field dr in SpecDistributionModules: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationOperator, v) + if v, ok := raw["ingress"]; !ok || v == nil { + return fmt.Errorf("field ingress in SpecDistributionModules: required") } - *j = TypesKubeTolerationOperator(v) + if v, ok := raw["logging"]; !ok || v == nil { + return fmt.Errorf("field logging in SpecDistributionModules: required") + } + if v, ok := raw["policy"]; !ok || v == nil { + return fmt.Errorf("field policy in SpecDistributionModules: required") + } + type Plain SpecDistributionModules + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModules(plain) return nil } -var enumValues_TypesKubeTolerationOperator = []interface{}{ - "Exists", - "Equal", -} +type TypesKubeLabels map[string]string // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDr) UnmarshalJSON(b []byte) error { +func (j *SpecDistribution) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesDr: required") + if v, ok := raw["modules"]; !ok || v == nil { + return fmt.Errorf("field modules in SpecDistribution: required") } - type Plain SpecDistributionModulesDr + type Plain SpecDistribution var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesDr(plain) + *j = SpecDistribution(plain) return nil } +type SpecKubernetesAdvancedAirGapDependenciesOverrideApt struct { + // URL where to download the GPG key of the Apt repository. Example: + // `https://pkgs.k8s.io/core:/stable:/v1.29/deb/Release.key` + GpgKey string `json:"gpg_key" yaml:"gpg_key" mapstructure:"gpg_key"` + + // The GPG key ID of the Apt repository. Example: + // `36A1D7869245C8950F966E92D8576A8BA88D21E9` + GpgKeyId string `json:"gpg_key_id" yaml:"gpg_key_id" mapstructure:"gpg_key_id"` + + // An indicative name for the Apt repository. Example: `k8s-1.29` + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // A source string for the new Apt repository. Example: `deb + // https://pkgs.k8s.io/core:/stable:/v1.29/deb/ /` + Repo string `json:"repo" yaml:"repo" mapstructure:"repo"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesNetworking) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesAdvancedAirGapDependenciesOverrideApt) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesNetworking: required") + if v, ok := raw["gpg_key"]; !ok || v == nil { + return fmt.Errorf("field gpg_key in SpecKubernetesAdvancedAirGapDependenciesOverrideApt: required") } - type Plain SpecDistributionModulesNetworking + if v, ok := raw["gpg_key_id"]; !ok || v == nil { + return fmt.Errorf("field gpg_key_id in SpecKubernetesAdvancedAirGapDependenciesOverrideApt: required") + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecKubernetesAdvancedAirGapDependenciesOverrideApt: required") + } + if v, ok := raw["repo"]; !ok || v == nil { + return fmt.Errorf("field repo in SpecKubernetesAdvancedAirGapDependenciesOverrideApt: required") + } + type Plain SpecKubernetesAdvancedAirGapDependenciesOverrideApt var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesNetworking(plain) + *j = SpecKubernetesAdvancedAirGapDependenciesOverrideApt(plain) return nil } +type SpecKubernetesAdvancedAirGapDependenciesOverrideYum struct { + // URL where to download the ASCII-armored GPG key of the Yum repository. Example: + // `https://pkgs.k8s.io/core:/stable:/v1.29/deb/Release.key` + GpgKey string `json:"gpg_key" yaml:"gpg_key" mapstructure:"gpg_key"` + + // If true, the GPG signature check on the packages will be enabled. + GpgKeyCheck bool `json:"gpg_key_check" yaml:"gpg_key_check" mapstructure:"gpg_key_check"` + + // An indicative name for the Yum repository. Example: `k8s-1.29` + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // URL to the directory where the Yum repository's `repodata` directory lives. + // Example: `https://pkgs.k8s.io/core:/stable:/v1.29/rpm/` + Repo string `json:"repo" yaml:"repo" mapstructure:"repo"` + + // If true, the GPG signature check on the `repodata` will be enabled. + RepoGpgCheck bool `json:"repo_gpg_check" yaml:"repo_gpg_check" mapstructure:"repo_gpg_check"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDrVeleroBackend) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecKubernetesAdvancedAirGapDependenciesOverrideYum) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesDrVeleroBackend { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["gpg_key"]; !ok || v == nil { + return fmt.Errorf("field gpg_key in SpecKubernetesAdvancedAirGapDependenciesOverrideYum: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrVeleroBackend, v) + if v, ok := raw["gpg_key_check"]; !ok || v == nil { + return fmt.Errorf("field gpg_key_check in SpecKubernetesAdvancedAirGapDependenciesOverrideYum: required") } - *j = SpecDistributionModulesDrVeleroBackend(v) + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecKubernetesAdvancedAirGapDependenciesOverrideYum: required") + } + if v, ok := raw["repo"]; !ok || v == nil { + return fmt.Errorf("field repo in SpecKubernetesAdvancedAirGapDependenciesOverrideYum: required") + } + if v, ok := raw["repo_gpg_check"]; !ok || v == nil { + return fmt.Errorf("field repo_gpg_check in SpecKubernetesAdvancedAirGapDependenciesOverrideYum: required") + } + type Plain SpecKubernetesAdvancedAirGapDependenciesOverrideYum + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecKubernetesAdvancedAirGapDependenciesOverrideYum(plain) return nil } +type SpecKubernetesAdvancedAirGapDependenciesOverride struct { + // Apt corresponds to the JSON schema field "apt". + Apt *SpecKubernetesAdvancedAirGapDependenciesOverrideApt `json:"apt,omitempty" yaml:"apt,omitempty" mapstructure:"apt,omitempty"` + + // Yum corresponds to the JSON schema field "yum". + Yum *SpecKubernetesAdvancedAirGapDependenciesOverrideYum `json:"yum,omitempty" yaml:"yum,omitempty" mapstructure:"yum,omitempty"` +} + +// Advanced configuration for air-gapped installations. Allows setting custom URLs +// where to download the binaries dependencies from and custom .deb and .rpm +// package repositories. +type SpecKubernetesAdvancedAirGap struct { + // URL where to download the `.tar.gz` with containerd from. The `tar.gz` should + // be as the one downloaded from containerd GitHub releases page. + ContainerdDownloadUrl *string `json:"containerdDownloadUrl,omitempty" yaml:"containerdDownloadUrl,omitempty" mapstructure:"containerdDownloadUrl,omitempty"` + + // DependenciesOverride corresponds to the JSON schema field + // "dependenciesOverride". + DependenciesOverride *SpecKubernetesAdvancedAirGapDependenciesOverride `json:"dependenciesOverride,omitempty" yaml:"dependenciesOverride,omitempty" mapstructure:"dependenciesOverride,omitempty"` + + // URL to the path where the etcd `tar.gz`s are available. etcd will be downloaded + // from + // `//etcd--linux-.tar.gz` + EtcdDownloadUrl *string `json:"etcdDownloadUrl,omitempty" yaml:"etcdDownloadUrl,omitempty" mapstructure:"etcdDownloadUrl,omitempty"` + + // Checksum for the runc binary. + RuncChecksum *string `json:"runcChecksum,omitempty" yaml:"runcChecksum,omitempty" mapstructure:"runcChecksum,omitempty"` + + // URL where to download the runc binary from. + RuncDownloadUrl *string `json:"runcDownloadUrl,omitempty" yaml:"runcDownloadUrl,omitempty" mapstructure:"runcDownloadUrl,omitempty"` +} + +type SpecKubernetesAdvancedCloud struct { + // Sets cloud config for the Kubelet + Config *string `json:"config,omitempty" yaml:"config,omitempty" mapstructure:"config,omitempty"` + + // Sets the cloud provider for the Kubelet + Provider *string `json:"provider,omitempty" yaml:"provider,omitempty" mapstructure:"provider,omitempty"` +} + +// Allows specifying custom configuration for a registry at containerd level. You +// can set authentication details and mirrors for a registry. +// This feature can be used for example to authenticate to a private registry at +// containerd (container runtime) level, i.e. globally instead of using +// `imagePullSecrets`. It also can be used to use a mirror for a registry or to +// enable insecure connections to trusted registries that have self-signed +// certificates. +type SpecKubernetesAdvancedContainerdRegistryConfigs []struct { + // Set to `true` to skip TLS verification (e.g. when using self-signed + // certificates). + InsecureSkipVerify *bool `json:"insecureSkipVerify,omitempty" yaml:"insecureSkipVerify,omitempty" mapstructure:"insecureSkipVerify,omitempty"` + + // Array of URLs with the mirrors to use for the registry. Example: + // `["http://mymirror.tld:8080"]` + MirrorEndpoint []string `json:"mirrorEndpoint,omitempty" yaml:"mirrorEndpoint,omitempty" mapstructure:"mirrorEndpoint,omitempty"` + + // The password containerd will use to authenticate against the registry. + Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` + + // Registry address on which you would like to configure authentication or + // mirror(s). Example: `myregistry.tld:5000` + Registry *string `json:"registry,omitempty" yaml:"registry,omitempty" mapstructure:"registry,omitempty"` + + // The username containerd will use to authenticate against the registry. + Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` +} + +// Advanced configuration for containerd +type SpecKubernetesAdvancedContainerd struct { + // RegistryConfigs corresponds to the JSON schema field "registryConfigs". + RegistryConfigs SpecKubernetesAdvancedContainerdRegistryConfigs `json:"registryConfigs,omitempty" yaml:"registryConfigs,omitempty" mapstructure:"registryConfigs,omitempty"` +} + +type SpecKubernetesAdvancedEncryption struct { + // etcd's encryption at rest configuration. Must be a string with the + // EncryptionConfiguration object in YAML. Example: + // + // ```yaml + // + // apiVersion: apiserver.config.k8s.io/v1 + // kind: EncryptionConfiguration + // resources: + // - resources: + // - secrets + // providers: + // - aescbc: + // keys: + // - name: mykey + // secret: base64_encoded_secret + // ``` + // + Configuration *string `json:"configuration,omitempty" yaml:"configuration,omitempty" mapstructure:"configuration,omitempty"` + + // The TLS cipher suites to use for etcd, kubelet, and kubeadm static pods. + // Example: + // ```yaml + // tlsCipherSuites: + // - "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256" + // - "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256" + // - "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384" + // - "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384" + // - "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256" + // - "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256" + // - "TLS_AES_128_GCM_SHA256" + // - "TLS_AES_256_GCM_SHA384" + // - "TLS_CHACHA20_POLY1305_SHA256" + // ``` + TlsCipherSuites []string `json:"tlsCipherSuites,omitempty" yaml:"tlsCipherSuites,omitempty" mapstructure:"tlsCipherSuites,omitempty"` +} + +// OIDC configuration for the Kubernetes API server. +type SpecKubernetesAdvancedOIDC struct { + // The path to the certificate for the CA that signed the identity provider's web + // certificate. Defaults to the host's root CAs. This should be a path available + // to the API Server. + CaFile *string `json:"ca_file,omitempty" yaml:"ca_file,omitempty" mapstructure:"ca_file,omitempty"` + + // The client ID the API server will use to authenticate to the OIDC provider. + ClientId *string `json:"client_id,omitempty" yaml:"client_id,omitempty" mapstructure:"client_id,omitempty"` + + // Prefix prepended to group claims to prevent clashes with existing names (such + // as system: groups). + GroupPrefix *string `json:"group_prefix,omitempty" yaml:"group_prefix,omitempty" mapstructure:"group_prefix,omitempty"` + + // JWT claim to use as the user's group. + GroupsClaim *string `json:"groups_claim,omitempty" yaml:"groups_claim,omitempty" mapstructure:"groups_claim,omitempty"` + + // The issuer URL of the OIDC provider. + IssuerUrl *string `json:"issuer_url,omitempty" yaml:"issuer_url,omitempty" mapstructure:"issuer_url,omitempty"` + + // JWT claim to use as the user name. The default value is `sub`, which is + // expected to be a unique identifier of the end user. + UsernameClaim *string `json:"username_claim,omitempty" yaml:"username_claim,omitempty" mapstructure:"username_claim,omitempty"` + + // Prefix prepended to username claims to prevent clashes with existing names + // (such as system: users). + UsernamePrefix *string `json:"username_prefix,omitempty" yaml:"username_prefix,omitempty" mapstructure:"username_prefix,omitempty"` +} + +type SpecKubernetesAdvancedUsers struct { + // List of user names to create and get a kubeconfig file. Users will not have any + // permissions by default, RBAC setup for the new users is needed. + Names []string `json:"names,omitempty" yaml:"names,omitempty" mapstructure:"names,omitempty"` + + // The organization the users belong to. + Org *string `json:"org,omitempty" yaml:"org,omitempty" mapstructure:"org,omitempty"` +} + +type SpecKubernetesAdvanced struct { + // AirGap corresponds to the JSON schema field "airGap". + AirGap *SpecKubernetesAdvancedAirGap `json:"airGap,omitempty" yaml:"airGap,omitempty" mapstructure:"airGap,omitempty"` + + // Cloud corresponds to the JSON schema field "cloud". + Cloud *SpecKubernetesAdvancedCloud `json:"cloud,omitempty" yaml:"cloud,omitempty" mapstructure:"cloud,omitempty"` + + // Containerd corresponds to the JSON schema field "containerd". + Containerd *SpecKubernetesAdvancedContainerd `json:"containerd,omitempty" yaml:"containerd,omitempty" mapstructure:"containerd,omitempty"` + + // Encryption corresponds to the JSON schema field "encryption". + Encryption *SpecKubernetesAdvancedEncryption `json:"encryption,omitempty" yaml:"encryption,omitempty" mapstructure:"encryption,omitempty"` + + // Oidc corresponds to the JSON schema field "oidc". + Oidc *SpecKubernetesAdvancedOIDC `json:"oidc,omitempty" yaml:"oidc,omitempty" mapstructure:"oidc,omitempty"` + + // URL of the registry where to pull images from for the Kubernetes phase. + // (Default is registry.sighup.io/fury/on-premises). + Registry *string `json:"registry,omitempty" yaml:"registry,omitempty" mapstructure:"registry,omitempty"` + + // Users corresponds to the JSON schema field "users". + Users *SpecKubernetesAdvancedUsers `json:"users,omitempty" yaml:"users,omitempty" mapstructure:"users,omitempty"` +} + +type SpecKubernetesAdvancedAnsible struct { + // Additional configuration to append to the ansible.cfg file + Config *string `json:"config,omitempty" yaml:"config,omitempty" mapstructure:"config,omitempty"` + + // The Python interpreter to use for running Ansible. Example: python3 + PythonInterpreter *string `json:"pythonInterpreter,omitempty" yaml:"pythonInterpreter,omitempty" mapstructure:"pythonInterpreter,omitempty"` +} + +type SpecKubernetesLoadBalancersHost struct { + // The IP address of the host. + Ip string `json:"ip" yaml:"ip" mapstructure:"ip"` + + // A name to identify the host. This value will be concatenated to + // `.spec.kubernetes.dnsZone` to calculate the FQDN for the host as + // `.`. + Name string `json:"name" yaml:"name" mapstructure:"name"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModules) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesLoadBalancersHost) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["dr"]; !ok || v == nil { - return fmt.Errorf("field dr in SpecDistributionModules: required") - } - if v, ok := raw["ingress"]; !ok || v == nil { - return fmt.Errorf("field ingress in SpecDistributionModules: required") - } - if v, ok := raw["logging"]; !ok || v == nil { - return fmt.Errorf("field logging in SpecDistributionModules: required") + if v, ok := raw["ip"]; !ok || v == nil { + return fmt.Errorf("field ip in SpecKubernetesLoadBalancersHost: required") } - if v, ok := raw["policy"]; !ok || v == nil { - return fmt.Errorf("field policy in SpecDistributionModules: required") + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecKubernetesLoadBalancersHost: required") } - type Plain SpecDistributionModules + type Plain SpecKubernetesLoadBalancersHost var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModules(plain) + *j = SpecKubernetesLoadBalancersHost(plain) return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeTolerationEffect) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_TypesKubeTolerationEffect { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationEffect, v) - } - *j = TypesKubeTolerationEffect(v) - return nil +type SpecKubernetesLoadBalancersKeepalived struct { + // Set to install keepalived with a floating virtual IP shared between the load + // balancer hosts for a deployment in High Availability. + Enabled bool `json:"enabled" yaml:"enabled" mapstructure:"enabled"` + + // Name of the network interface where to bind the Keepalived virtual IP. + Interface *string `json:"interface,omitempty" yaml:"interface,omitempty" mapstructure:"interface,omitempty"` + + // The Virtual floating IP for Keepalived + Ip *string `json:"ip,omitempty" yaml:"ip,omitempty" mapstructure:"ip,omitempty"` + + // The passphrase for the Keepalived clustering. + Passphrase *string `json:"passphrase,omitempty" yaml:"passphrase,omitempty" mapstructure:"passphrase,omitempty"` + + // The virtual router ID of Keepalived, must be different from other Keepalived + // instances in the same network. + VirtualRouterId *string `json:"virtualRouterId,omitempty" yaml:"virtualRouterId,omitempty" mapstructure:"virtualRouterId,omitempty"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthPomeriumSecrets) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesLoadBalancersKeepalived) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["COOKIE_SECRET"]; !ok || v == nil { - return fmt.Errorf("field COOKIE_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") - } - if v, ok := raw["IDP_CLIENT_SECRET"]; !ok || v == nil { - return fmt.Errorf("field IDP_CLIENT_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") - } - if v, ok := raw["SHARED_SECRET"]; !ok || v == nil { - return fmt.Errorf("field SHARED_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") - } - if v, ok := raw["SIGNING_KEY"]; !ok || v == nil { - return fmt.Errorf("field SIGNING_KEY in SpecDistributionModulesAuthPomeriumSecrets: required") + if v, ok := raw["enabled"]; !ok || v == nil { + return fmt.Errorf("field enabled in SpecKubernetesLoadBalancersKeepalived: required") } - type Plain SpecDistributionModulesAuthPomeriumSecrets + type Plain SpecKubernetesLoadBalancersKeepalived var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthPomeriumSecrets(plain) + *j = SpecKubernetesLoadBalancersKeepalived(plain) return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDrType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesDrType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrType, v) - } - *j = SpecDistributionModulesDrType(v) - return nil +// Configuration for HAProxy stats page. Accessible at http://:1936/stats +type SpecKubernetesLoadBalancersStats struct { + // The basic-auth password for HAProxy's stats page. + Password string `json:"password" yaml:"password" mapstructure:"password"` + + // The basic-auth username for HAProxy's stats page + Username string `json:"username" yaml:"username" mapstructure:"username"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesNetworkingType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecKubernetesLoadBalancersStats) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesNetworkingType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["password"]; !ok || v == nil { + return fmt.Errorf("field password in SpecKubernetesLoadBalancersStats: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesNetworkingType, v) + if v, ok := raw["username"]; !ok || v == nil { + return fmt.Errorf("field username in SpecKubernetesLoadBalancersStats: required") } - *j = SpecDistributionModulesNetworkingType(v) + type Plain SpecKubernetesLoadBalancersStats + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecKubernetesLoadBalancersStats(plain) return nil } -var enumValues_TypesKubeTolerationEffect_1 = []interface{}{ - "NoSchedule", - "PreferNoSchedule", - "NoExecute", +type SpecKubernetesLoadBalancers struct { + // Additional configuration to append to HAProxy's configuration file. + AdditionalConfig *string `json:"additionalConfig,omitempty" yaml:"additionalConfig,omitempty" mapstructure:"additionalConfig,omitempty"` + + // Set to true to install HAProxy and configure it as a load balancer on the the + // load balancer hosts. + Enabled bool `json:"enabled" yaml:"enabled" mapstructure:"enabled"` + + // Hosts corresponds to the JSON schema field "hosts". + Hosts []SpecKubernetesLoadBalancersHost `json:"hosts,omitempty" yaml:"hosts,omitempty" mapstructure:"hosts,omitempty"` + + // Keepalived corresponds to the JSON schema field "keepalived". + Keepalived *SpecKubernetesLoadBalancersKeepalived `json:"keepalived,omitempty" yaml:"keepalived,omitempty" mapstructure:"keepalived,omitempty"` + + // Stats corresponds to the JSON schema field "stats". + Stats *SpecKubernetesLoadBalancersStats `json:"stats,omitempty" yaml:"stats,omitempty" mapstructure:"stats,omitempty"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeTolerationEffect_1) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecKubernetesLoadBalancers) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_TypesKubeTolerationEffect_1 { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["enabled"]; !ok || v == nil { + return fmt.Errorf("field enabled in SpecKubernetesLoadBalancers: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationEffect_1, v) + type Plain SpecKubernetesLoadBalancers + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err } - *j = TypesKubeTolerationEffect_1(v) + *j = SpecKubernetesLoadBalancers(plain) return nil } -var enumValues_SpecDistributionModulesNetworkingType = []interface{}{ - "calico", - "cilium", +type SpecKubernetesMastersHost struct { + // The IP address of the host + Ip string `json:"ip" yaml:"ip" mapstructure:"ip"` + + // A name to identify the host. This value will be concatenated to + // `.spec.kubernetes.dnsZone` to calculate the FQDN for the host as + // `.`. + Name string `json:"name" yaml:"name" mapstructure:"name"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistribution) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesMastersHost) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["modules"]; !ok || v == nil { - return fmt.Errorf("field modules in SpecDistribution: required") + if v, ok := raw["ip"]; !ok || v == nil { + return fmt.Errorf("field ip in SpecKubernetesMastersHost: required") } - type Plain SpecDistribution + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecKubernetesMastersHost: required") + } + type Plain SpecKubernetesMastersHost var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistribution(plain) + *j = SpecKubernetesMastersHost(plain) return nil } +// Configuration for the control plane hosts +type SpecKubernetesMasters struct { + // Hosts corresponds to the JSON schema field "hosts". + Hosts []SpecKubernetesMastersHost `json:"hosts" yaml:"hosts" mapstructure:"hosts"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLogging) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesMasters) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesLogging: required") + if v, ok := raw["hosts"]; !ok || v == nil { + return fmt.Errorf("field hosts in SpecKubernetesMasters: required") } - type Plain SpecDistributionModulesLogging + type Plain SpecKubernetesMasters var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesLogging(plain) + *j = SpecKubernetesMasters(plain) return nil } +type SpecKubernetesNodesNodeHost struct { + // The IP address of the host + Ip string `json:"ip" yaml:"ip" mapstructure:"ip"` + + // A name to identify the host. This value will be concatenated to + // `.spec.kubernetes.dnsZone` to calculate the FQDN for the host as + // `.`. + Name string `json:"name" yaml:"name" mapstructure:"name"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesMonitoring) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodesNodeHost) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesMonitoring: required") + if v, ok := raw["ip"]; !ok || v == nil { + return fmt.Errorf("field ip in SpecKubernetesNodesNodeHost: required") } - type Plain SpecDistributionModulesMonitoring + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecKubernetesNodesNodeHost: required") + } + type Plain SpecKubernetesNodesNodeHost var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesMonitoring(plain) + *j = SpecKubernetesNodesNodeHost(plain) return nil } -var enumValues_TypesKubeTolerationOperator_1 = []interface{}{ - "Exists", - "Equal", +type TypesKubeTaintsEffect string + +var enumValues_TypesKubeTaintsEffect = []interface{}{ + "NoSchedule", + "PreferNoSchedule", + "NoExecute", } // UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeTolerationOperator_1) UnmarshalJSON(b []byte) error { +func (j *TypesKubeTaintsEffect) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_TypesKubeTolerationOperator_1 { + for _, expected := range enumValues_TypesKubeTaintsEffect { if reflect.DeepEqual(v, expected) { ok = true break } } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationOperator_1, v) + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTaintsEffect, v) + } + *j = TypesKubeTaintsEffect(v) + return nil +} + +const ( + TypesKubeTaintsEffectNoSchedule TypesKubeTaintsEffect = "NoSchedule" + TypesKubeTaintsEffectPreferNoSchedule TypesKubeTaintsEffect = "PreferNoSchedule" + TypesKubeTaintsEffectNoExecute TypesKubeTaintsEffect = "NoExecute" +) + +type TypesKubeTaints struct { + // Effect corresponds to the JSON schema field "effect". + Effect TypesKubeTaintsEffect `json:"effect" yaml:"effect" mapstructure:"effect"` + + // Key corresponds to the JSON schema field "key". + Key string `json:"key" yaml:"key" mapstructure:"key"` + + // Value corresponds to the JSON schema field "value". + Value string `json:"value" yaml:"value" mapstructure:"value"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *TypesKubeTaints) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["effect"]; !ok || v == nil { + return fmt.Errorf("field effect in TypesKubeTaints: required") + } + if v, ok := raw["key"]; !ok || v == nil { + return fmt.Errorf("field key in TypesKubeTaints: required") } - *j = TypesKubeTolerationOperator_1(v) + if v, ok := raw["value"]; !ok || v == nil { + return fmt.Errorf("field value in TypesKubeTaints: required") + } + type Plain TypesKubeTaints + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = TypesKubeTaints(plain) return nil } +type SpecKubernetesNodesNode struct { + // Hosts corresponds to the JSON schema field "hosts". + Hosts []SpecKubernetesNodesNodeHost `json:"hosts" yaml:"hosts" mapstructure:"hosts"` + + // Name for the node group. It will be also used as the node role label. It should + // follow the [valid variable names + // guideline](https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_variables.html#valid-variable-names) + // from Ansible. + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // Taints corresponds to the JSON schema field "taints". + Taints []TypesKubeTaints `json:"taints,omitempty" yaml:"taints,omitempty" mapstructure:"taints,omitempty"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesAdvancedAirGapDependenciesOverrideYum) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodesNode) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["gpg_key"]; !ok || v == nil { - return fmt.Errorf("field gpg_key in SpecKubernetesAdvancedAirGapDependenciesOverrideYum: required") - } - if v, ok := raw["gpg_key_check"]; !ok || v == nil { - return fmt.Errorf("field gpg_key_check in SpecKubernetesAdvancedAirGapDependenciesOverrideYum: required") + if v, ok := raw["hosts"]; !ok || v == nil { + return fmt.Errorf("field hosts in SpecKubernetesNodesNode: required") } if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecKubernetesAdvancedAirGapDependenciesOverrideYum: required") - } - if v, ok := raw["repo"]; !ok || v == nil { - return fmt.Errorf("field repo in SpecKubernetesAdvancedAirGapDependenciesOverrideYum: required") - } - if v, ok := raw["repo_gpg_check"]; !ok || v == nil { - return fmt.Errorf("field repo_gpg_check in SpecKubernetesAdvancedAirGapDependenciesOverrideYum: required") + return fmt.Errorf("field name in SpecKubernetesNodesNode: required") } - type Plain SpecKubernetesAdvancedAirGapDependenciesOverrideYum + type Plain SpecKubernetesNodesNode var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesAdvancedAirGapDependenciesOverrideYum(plain) + if plain.Hosts != nil && len(plain.Hosts) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "hosts", 1) + } + *j = SpecKubernetesNodesNode(plain) return nil } +// Configuration for the node hosts +type SpecKubernetesNodes []SpecKubernetesNodesNode + +type TypesUri string + +type SpecKubernetesProxy struct { + // The HTTP proxy URL. Example: http://test.example.dev:3128 + Http *TypesUri `json:"http,omitempty" yaml:"http,omitempty" mapstructure:"http,omitempty"` + + // The HTTPS proxy URL. Example: https://test.example.dev:3128 + Https *TypesUri `json:"https,omitempty" yaml:"https,omitempty" mapstructure:"https,omitempty"` + + // Comma-separated list of hosts that should not use the HTTP(S) proxy. Example: + // localhost,127.0.0.1,172.16.0.0/17,172.16.128.0/17,10.0.0.0/8,.example.dev + NoProxy *string `json:"noProxy,omitempty" yaml:"noProxy,omitempty" mapstructure:"noProxy,omitempty"` +} + +// SSH credentials to access the hosts +type SpecKubernetesSSH struct { + // The path to the private key to use to connect to the hosts + KeyPath string `json:"keyPath" yaml:"keyPath" mapstructure:"keyPath"` + + // The username to use to connect to the hosts + Username string `json:"username" yaml:"username" mapstructure:"username"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuth) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesSSH) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["provider"]; !ok || v == nil { - return fmt.Errorf("field provider in SpecDistributionModulesAuth: required") + if v, ok := raw["keyPath"]; !ok || v == nil { + return fmt.Errorf("field keyPath in SpecKubernetesSSH: required") } - type Plain SpecDistributionModulesAuth + if v, ok := raw["username"]; !ok || v == nil { + return fmt.Errorf("field username in SpecKubernetesSSH: required") + } + type Plain SpecKubernetesSSH var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuth(plain) + *j = SpecKubernetesSSH(plain) return nil } +// Defines the Kubernetes components configuration and the values needed for the +// kubernetes phase of furyctl. +type SpecKubernetes struct { + // Advanced corresponds to the JSON schema field "advanced". + Advanced *SpecKubernetesAdvanced `json:"advanced,omitempty" yaml:"advanced,omitempty" mapstructure:"advanced,omitempty"` + + // AdvancedAnsible corresponds to the JSON schema field "advancedAnsible". + AdvancedAnsible *SpecKubernetesAdvancedAnsible `json:"advancedAnsible,omitempty" yaml:"advancedAnsible,omitempty" mapstructure:"advancedAnsible,omitempty"` + + // The address for the Kubernetes control plane. Usually a DNS entry pointing to a + // Load Balancer on port 6443. + ControlPlaneAddress string `json:"controlPlaneAddress" yaml:"controlPlaneAddress" mapstructure:"controlPlaneAddress"` + + // The DNS zone of the machines. It will be appended to the name of each host to + // generate the `kubernetes_hostname` in the Ansible inventory file. It is also + // used to calculate etcd's initial cluster value. + DnsZone string `json:"dnsZone" yaml:"dnsZone" mapstructure:"dnsZone"` + + // LoadBalancers corresponds to the JSON schema field "loadBalancers". + LoadBalancers SpecKubernetesLoadBalancers `json:"loadBalancers" yaml:"loadBalancers" mapstructure:"loadBalancers"` + + // Masters corresponds to the JSON schema field "masters". + Masters SpecKubernetesMasters `json:"masters" yaml:"masters" mapstructure:"masters"` + + // Nodes corresponds to the JSON schema field "nodes". + Nodes SpecKubernetesNodes `json:"nodes" yaml:"nodes" mapstructure:"nodes"` + + // The path to the folder where the PKI files for Kubernetes and etcd are stored. + PkiFolder string `json:"pkiFolder" yaml:"pkiFolder" mapstructure:"pkiFolder"` + + // The subnet CIDR to use for the Pods network. + PodCidr TypesCidr `json:"podCidr" yaml:"podCidr" mapstructure:"podCidr"` + + // Proxy corresponds to the JSON schema field "proxy". + Proxy *SpecKubernetesProxy `json:"proxy,omitempty" yaml:"proxy,omitempty" mapstructure:"proxy,omitempty"` + + // Ssh corresponds to the JSON schema field "ssh". + Ssh SpecKubernetesSSH `json:"ssh" yaml:"ssh" mapstructure:"ssh"` + + // The subnet CIDR to use for the Services network. + SvcCidr TypesCidr `json:"svcCidr" yaml:"svcCidr" mapstructure:"svcCidr"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthProvider) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetes) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesAuthProvider: required") + if v, ok := raw["controlPlaneAddress"]; !ok || v == nil { + return fmt.Errorf("field controlPlaneAddress in SpecKubernetes: required") } - type Plain SpecDistributionModulesAuthProvider + if v, ok := raw["dnsZone"]; !ok || v == nil { + return fmt.Errorf("field dnsZone in SpecKubernetes: required") + } + if v, ok := raw["loadBalancers"]; !ok || v == nil { + return fmt.Errorf("field loadBalancers in SpecKubernetes: required") + } + if v, ok := raw["masters"]; !ok || v == nil { + return fmt.Errorf("field masters in SpecKubernetes: required") + } + if v, ok := raw["nodes"]; !ok || v == nil { + return fmt.Errorf("field nodes in SpecKubernetes: required") + } + if v, ok := raw["pkiFolder"]; !ok || v == nil { + return fmt.Errorf("field pkiFolder in SpecKubernetes: required") + } + if v, ok := raw["podCidr"]; !ok || v == nil { + return fmt.Errorf("field podCidr in SpecKubernetes: required") + } + if v, ok := raw["ssh"]; !ok || v == nil { + return fmt.Errorf("field ssh in SpecKubernetes: required") + } + if v, ok := raw["svcCidr"]; !ok || v == nil { + return fmt.Errorf("field svcCidr in SpecKubernetes: required") + } + type Plain SpecKubernetes var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthProvider(plain) + *j = SpecKubernetes(plain) return nil } +type SpecPluginsHelmReleasesElemSetElem struct { + // The name of the set + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // The value of the set + Value string `json:"value" yaml:"value" mapstructure:"value"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeToleration_1) UnmarshalJSON(b []byte) error { +func (j *SpecPluginsHelmReleasesElemSetElem) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["effect"]; !ok || v == nil { - return fmt.Errorf("field effect in TypesKubeToleration_1: required") - } - if v, ok := raw["key"]; !ok || v == nil { - return fmt.Errorf("field key in TypesKubeToleration_1: required") + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecPluginsHelmReleasesElemSetElem: required") } if v, ok := raw["value"]; !ok || v == nil { - return fmt.Errorf("field value in TypesKubeToleration_1: required") + return fmt.Errorf("field value in SpecPluginsHelmReleasesElemSetElem: required") } - type Plain TypesKubeToleration_1 + type Plain SpecPluginsHelmReleasesElemSetElem var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = TypesKubeToleration_1(plain) + *j = SpecPluginsHelmReleasesElemSetElem(plain) return nil } -var enumValues_SpecDistributionModulesMonitoringMimirBackend = []interface{}{ - "minio", - "externalEndpoint", +type SpecPluginsHelmReleases []struct { + // The chart of the release + Chart string `json:"chart" yaml:"chart" mapstructure:"chart"` + + // Disable running `helm diff` validation when installing the plugin, it will + // still be done when upgrading. + DisableValidationOnInstall *bool `json:"disableValidationOnInstall,omitempty" yaml:"disableValidationOnInstall,omitempty" mapstructure:"disableValidationOnInstall,omitempty"` + + // The name of the release + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // The namespace of the release + Namespace string `json:"namespace" yaml:"namespace" mapstructure:"namespace"` + + // Set corresponds to the JSON schema field "set". + Set []SpecPluginsHelmReleasesElemSetElem `json:"set,omitempty" yaml:"set,omitempty" mapstructure:"set,omitempty"` + + // The values of the release + Values []string `json:"values,omitempty" yaml:"values,omitempty" mapstructure:"values,omitempty"` + + // The version of the release + Version *string `json:"version,omitempty" yaml:"version,omitempty" mapstructure:"version,omitempty"` } -var enumValues_TypesKubeTolerationEffect = []interface{}{ - "NoSchedule", - "PreferNoSchedule", - "NoExecute", +type SpecPluginsHelmRepositories []struct { + // The name of the repository + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // The url of the repository + Url string `json:"url" yaml:"url" mapstructure:"url"` +} + +type SpecPluginsHelm struct { + // Releases corresponds to the JSON schema field "releases". + Releases SpecPluginsHelmReleases `json:"releases,omitempty" yaml:"releases,omitempty" mapstructure:"releases,omitempty"` + + // Repositories corresponds to the JSON schema field "repositories". + Repositories SpecPluginsHelmRepositories `json:"repositories,omitempty" yaml:"repositories,omitempty" mapstructure:"repositories,omitempty"` +} + +type SpecPluginsKustomize []struct { + // The folder of the kustomize plugin + Folder string `json:"folder" yaml:"folder" mapstructure:"folder"` + + // The name of the kustomize plugin + Name string `json:"name" yaml:"name" mapstructure:"name"` +} + +type SpecPlugins struct { + // Helm corresponds to the JSON schema field "helm". + Helm *SpecPluginsHelm `json:"helm,omitempty" yaml:"helm,omitempty" mapstructure:"helm,omitempty"` + + // Kustomize corresponds to the JSON schema field "kustomize". + Kustomize SpecPluginsKustomize `json:"kustomize,omitempty" yaml:"kustomize,omitempty" mapstructure:"kustomize,omitempty"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesMonitoringType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesMonitoringType { + for _, expected := range enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior, v) } - *j = SpecDistributionModulesMonitoringType(v) + *j = SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior(v) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *Spec) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["distribution"]; !ok || v == nil { + return fmt.Errorf("field distribution in Spec: required") + } + if v, ok := raw["distributionVersion"]; !ok || v == nil { + return fmt.Errorf("field distributionVersion in Spec: required") + } + type Plain Spec + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + if len(plain.DistributionVersion) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "distributionVersion", 1) + } + *j = Spec(plain) return nil } +var enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = []interface{}{ + "create", + "replace", + "merge", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthPomerium_2) UnmarshalJSON(b []byte) error { +func (j *TypesKubeToleration) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["secrets"]; !ok || v == nil { - return fmt.Errorf("field secrets in SpecDistributionModulesAuthPomerium_2: required") + if v, ok := raw["effect"]; !ok || v == nil { + return fmt.Errorf("field effect in TypesKubeToleration: required") } - type Plain SpecDistributionModulesAuthPomerium_2 + if v, ok := raw["key"]; !ok || v == nil { + return fmt.Errorf("field key in TypesKubeToleration: required") + } + type Plain TypesKubeToleration var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthPomerium_2(plain) + *j = TypesKubeToleration(plain) return nil } -var enumValues_SpecDistributionModulesMonitoringType = []interface{}{ - "none", - "prometheus", - "prometheusAgent", - "mimir", +type TypesKubeToleration struct { + // Effect corresponds to the JSON schema field "effect". + Effect TypesKubeTolerationEffect `json:"effect" yaml:"effect" mapstructure:"effect"` + + // The key of the toleration + Key string `json:"key" yaml:"key" mapstructure:"key"` + + // Operator corresponds to the JSON schema field "operator". + Operator *TypesKubeTolerationOperator `json:"operator,omitempty" yaml:"operator,omitempty" mapstructure:"operator,omitempty"` + + // The value of the toleration + Value *string `json:"value,omitempty" yaml:"value,omitempty" mapstructure:"value,omitempty"` } +const ( + TypesKubeTolerationOperatorEqual TypesKubeTolerationOperator = "Equal" + TypesKubeTolerationOperatorExists TypesKubeTolerationOperator = "Exists" +) + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesMonitoringMimirBackend) UnmarshalJSON(b []byte) error { +func (j *TypesKubeTolerationOperator) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesMonitoringMimirBackend { + for _, expected := range enumValues_TypesKubeTolerationOperator { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringMimirBackend, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationOperator, v) } - *j = SpecDistributionModulesMonitoringMimirBackend(v) + *j = TypesKubeTolerationOperator(v) return nil } +var enumValues_TypesKubeTolerationOperator = []interface{}{ + "Exists", + "Equal", +} + +type TypesKubeTolerationOperator string + +const ( + TypesKubeTolerationEffectNoExecute TypesKubeTolerationEffect = "NoExecute" + TypesKubeTolerationEffectPreferNoSchedule TypesKubeTolerationEffect = "PreferNoSchedule" + TypesKubeTolerationEffectNoSchedule TypesKubeTolerationEffect = "NoSchedule" +) + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesSecretGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { +func (j *TypesKubeTolerationEffect) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior { + for _, expected := range enumValues_TypesKubeTolerationEffect { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationEffect, v) } - *j = SpecDistributionCustomPatchesSecretGeneratorResourceBehavior(v) + *j = TypesKubeTolerationEffect(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesSecretGeneratorResource) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuthPomeriumSecrets) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionCustomPatchesSecretGeneratorResource: required") + if v, ok := raw["COOKIE_SECRET"]; !ok || v == nil { + return fmt.Errorf("field COOKIE_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") } - type Plain SpecDistributionCustomPatchesSecretGeneratorResource + if v, ok := raw["IDP_CLIENT_SECRET"]; !ok || v == nil { + return fmt.Errorf("field IDP_CLIENT_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") + } + if v, ok := raw["SHARED_SECRET"]; !ok || v == nil { + return fmt.Errorf("field SHARED_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") + } + if v, ok := raw["SIGNING_KEY"]; !ok || v == nil { + return fmt.Errorf("field SIGNING_KEY in SpecDistributionModulesAuthPomeriumSecrets: required") + } + type Plain SpecDistributionModulesAuthPomeriumSecrets var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionCustomPatchesSecretGeneratorResource(plain) + *j = SpecDistributionModulesAuthPomeriumSecrets(plain) return nil } +type TypesKubeNodeSelector_1 map[string]string + +type TypesKubeTolerationEffect_1 string + +var enumValues_TypesKubeTolerationEffect_1 = []interface{}{ + "NoSchedule", + "PreferNoSchedule", + "NoExecute", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthProviderType) UnmarshalJSON(b []byte) error { +func (j *TypesKubeTolerationEffect_1) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesAuthProviderType { + for _, expected := range enumValues_TypesKubeTolerationEffect_1 { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesAuthProviderType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationEffect_1, v) } - *j = SpecDistributionModulesAuthProviderType(v) + *j = TypesKubeTolerationEffect_1(v) return nil } +const ( + TypesKubeTolerationEffect_1_NoSchedule TypesKubeTolerationEffect_1 = "NoSchedule" + TypesKubeTolerationEffect_1_PreferNoSchedule TypesKubeTolerationEffect_1 = "PreferNoSchedule" + TypesKubeTolerationEffect_1_NoExecute TypesKubeTolerationEffect_1 = "NoExecute" +) + +type TypesKubeTolerationOperator_1 string + +var enumValues_TypesKubeTolerationOperator_1 = []interface{}{ + "Exists", + "Equal", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthDex) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *TypesKubeTolerationOperator_1) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["connectors"]; !ok || v == nil { - return fmt.Errorf("field connectors in SpecDistributionModulesAuthDex: required") + var ok bool + for _, expected := range enumValues_TypesKubeTolerationOperator_1 { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecDistributionModulesAuthDex - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationOperator_1, v) } - *j = SpecDistributionModulesAuthDex(plain) + *j = TypesKubeTolerationOperator_1(v) return nil } +const ( + TypesKubeTolerationOperator_1_Exists TypesKubeTolerationOperator_1 = "Exists" + TypesKubeTolerationOperator_1_Equal TypesKubeTolerationOperator_1 = "Equal" +) + +type TypesKubeToleration_1 struct { + // Effect corresponds to the JSON schema field "effect". + Effect TypesKubeTolerationEffect_1 `json:"effect" yaml:"effect" mapstructure:"effect"` + + // Key corresponds to the JSON schema field "key". + Key string `json:"key" yaml:"key" mapstructure:"key"` + + // Operator corresponds to the JSON schema field "operator". + Operator *TypesKubeTolerationOperator_1 `json:"operator,omitempty" yaml:"operator,omitempty" mapstructure:"operator,omitempty"` + + // Value corresponds to the JSON schema field "value". + Value string `json:"value" yaml:"value" mapstructure:"value"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthOIDCKubernetesAuth) UnmarshalJSON(b []byte) error { +func (j *TypesKubeToleration_1) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["enabled"]; !ok || v == nil { - return fmt.Errorf("field enabled in SpecDistributionModulesAuthOIDCKubernetesAuth: required") + if v, ok := raw["effect"]; !ok || v == nil { + return fmt.Errorf("field effect in TypesKubeToleration_1: required") } - type Plain SpecDistributionModulesAuthOIDCKubernetesAuth + if v, ok := raw["key"]; !ok || v == nil { + return fmt.Errorf("field key in TypesKubeToleration_1: required") + } + if v, ok := raw["value"]; !ok || v == nil { + return fmt.Errorf("field value in TypesKubeToleration_1: required") + } + type Plain TypesKubeToleration_1 var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthOIDCKubernetesAuth(plain) + *j = TypesKubeToleration_1(plain) return nil } +type TypesFuryModuleComponentOverrides_1 struct { + // NodeSelector corresponds to the JSON schema field "nodeSelector". + NodeSelector TypesKubeNodeSelector_1 `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // Tolerations corresponds to the JSON schema field "tolerations". + Tolerations []TypesKubeToleration_1 `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +} + +var enumValues_TypesKubeTolerationEffect = []interface{}{ + "NoSchedule", + "PreferNoSchedule", + "NoExecute", +} + +type TypesKubeTolerationEffect string + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCommonProvider) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuthPomerium_2) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionCommonProvider: required") + if v, ok := raw["secrets"]; !ok || v == nil { + return fmt.Errorf("field secrets in SpecDistributionModulesAuthPomerium_2: required") } - type Plain SpecDistributionCommonProvider + type Plain SpecDistributionModulesAuthPomerium_2 var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionCommonProvider(plain) + *j = SpecDistributionModulesAuthPomerium_2(plain) return nil } +type TypesEnvRef string + +type TypesFileRef string + +type TypesIpAddress string + +type TypesKubeLabels_1 map[string]string + +type TypesSemVer string + +type TypesSshPubKey string + +type TypesTcpPort int + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthOverridesIngress) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionCommonProvider) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["host"]; !ok || v == nil { - return fmt.Errorf("field host in SpecDistributionModulesAuthOverridesIngress: required") - } - if v, ok := raw["ingressClass"]; !ok || v == nil { - return fmt.Errorf("field ingressClass in SpecDistributionModulesAuthOverridesIngress: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionCommonProvider: required") } - type Plain SpecDistributionModulesAuthOverridesIngress + type Plain SpecDistributionCommonProvider var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthOverridesIngress(plain) + *j = SpecDistributionCommonProvider(plain) return nil } +var enumValues_OnpremisesKfdV1Alpha2Kind = []interface{}{ + "OnPremises", +} + // UnmarshalJSON implements json.Unmarshaler. func (j *OnpremisesKfdV1Alpha2Kind) UnmarshalJSON(b []byte) error { var v string @@ -3384,26 +3728,7 @@ func (j *OnpremisesKfdV1Alpha2Kind) UnmarshalJSON(b []byte) error { return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthProviderBasicAuth) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["password"]; !ok || v == nil { - return fmt.Errorf("field password in SpecDistributionModulesAuthProviderBasicAuth: required") - } - if v, ok := raw["username"]; !ok || v == nil { - return fmt.Errorf("field username in SpecDistributionModulesAuthProviderBasicAuth: required") - } - type Plain SpecDistributionModulesAuthProviderBasicAuth - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesAuthProviderBasicAuth(plain) - return nil -} +type TypesKubeNodeSelector map[string]string // UnmarshalJSON implements json.Unmarshaler. func (j *Metadata) UnmarshalJSON(b []byte) error { diff --git a/rules/onpremises-kfd-v1alpha2.yaml b/rules/onpremises-kfd-v1alpha2.yaml index 7afc29d61..dedf65668 100644 --- a/rules/onpremises-kfd-v1alpha2.yaml +++ b/rules/onpremises-kfd-v1alpha2.yaml @@ -13,6 +13,14 @@ kubernetes: - path: .spec.kubernetes.svcCidr immutable: true distribution: + - path: .spec.distribution.common.networkPoliciesEnabled + immutable: false + description: "changes to the network policies have been detected. This will cause the reconfiguration or deletion of the current network policies." + safe: + - to: none + reducers: + - key: distributionCommonNetworkPoliciesEnabled + lifecycle: pre-apply - path: .spec.distribution.modules.networking.type immutable: true - path: .spec.distribution.modules.logging.type diff --git a/schemas/private/ekscluster-kfd-v1alpha2.json b/schemas/private/ekscluster-kfd-v1alpha2.json index c4235bcd8..0b82f017a 100644 --- a/schemas/private/ekscluster-kfd-v1alpha2.json +++ b/schemas/private/ekscluster-kfd-v1alpha2.json @@ -1,4 +1,32 @@ { + "$schema": "http://json-schema.org/draft-07/schema#", + "description": "A KFD Cluster deployed on top of AWS's Elastic Kubernetes Service (EKS).", + "type": "object", + "properties": { + "apiVersion": { + "type": "string", + "pattern": "^kfd\\.sighup\\.io/v\\d+((alpha|beta)\\d+)?$" + }, + "kind": { + "type": "string", + "enum": [ + "EKSCluster" + ] + }, + "metadata": { + "$ref": "#/$defs/Metadata" + }, + "spec": { + "$ref": "#/$defs/Spec" + } + }, + "additionalProperties": false, + "required": [ + "apiVersion", + "kind", + "metadata", + "spec" + ], "$defs": { "Metadata": { "type": "object", @@ -6,6 +34,7 @@ "properties": { "name": { "type": "string", + "description": "The name of the cluster. It will also be used as a prefix for all the other resources created.", "minLength": 1, "maxLength": 56 } @@ -20,17 +49,20 @@ "properties": { "distributionVersion": { "type": "string", + "description": "Defines which KFD version will be installed and, in consequence, the Kubernetes version used to create the cluster. It supports git tags and branches. Example: `v1.30.1`.", "minLength": 1 }, "region": { - "$ref": "#/$defs/Types.AwsRegion" + "$ref": "#/$defs/Types.AwsRegion", + "description": "Defines in which AWS region the cluster and all the related resources will be created." }, "tags": { "$ref": "#/$defs/Types.AwsTags", "description": "This map defines which will be the common tags that will be added to all the resources created on AWS." }, "toolsConfiguration": { - "$ref": "#/$defs/Spec.ToolsConfiguration" + "$ref": "#/$defs/Spec.ToolsConfiguration", + "description": "Configuration for tools used by furyctl, like Terraform." }, "infrastructure": { "$ref": "#/$defs/Spec.Infrastructure" @@ -100,251 +132,148 @@ } } }, - "Spec.Distribution": { + "Spec.ToolsConfiguration": { "type": "object", "additionalProperties": false, "properties": { - "common": { - "$ref": "#/$defs/Spec.Distribution.Common" - }, - "modules": { - "$ref": "#/$defs/Spec.Distribution.Modules" - }, - "customPatches": { - "$ref": "../public/spec-distribution-custompatches.json" + "terraform": { + "$ref": "#/$defs/Spec.ToolsConfiguration.Terraform" } }, "required": [ - "modules" - ], - "if": { - "allOf": [ - { - "required": [ - "common" - ] - }, - { - "properties": { - "common": { - "required": [ - "provider" - ] - } - } - }, - { - "properties": { - "common": { - "properties": { - "provider": { - "required": [ - "type" - ] - } - } - } - } - }, - { - "properties": { - "common": { - "properties": { - "provider": { - "properties": { - "type": { - "const": "eks" - } - } - } - } - } - } - } - ] - }, - "then": { - "properties": { - "modules": { - "required": [ - "aws" - ] - } - } - }, - "else": { - "properties": { - "modules": { - "properties": { - "aws": { - "type": "null" - } - } - } - } - } + "terraform" + ] }, - "Spec.Distribution.Common": { + "Spec.ToolsConfiguration.Terraform": { "type": "object", "additionalProperties": false, "properties": { - "nodeSelector": { - "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for all the KFD modules" - }, - "tolerations": { - "type": "array", - "items": { - "$ref": "#/$defs/Types.KubeToleration" - }, - "description": "The tolerations that will be added to the pods for all the KFD modules" - }, - "provider": { - "$ref": "#/$defs/Spec.Distribution.Common.Provider" - }, - "relativeVendorPath": { - "type": "string", - "description": "The relative path to the vendor directory, does not need to be changed" - }, - "registry": { - "type": "string", - "description": "URL of the registry where to pull images from for the Distribution phase. (Default is registry.sighup.io/fury).\n\nNOTE: If plugins are pulling from the default registry, the registry will be replaced for these plugins too." + "state": { + "$ref": "#/$defs/Spec.ToolsConfiguration.Terraform.State" } - } + }, + "required": [ + "state" + ] }, - "Spec.Distribution.Common.Provider": { + "Spec.ToolsConfiguration.Terraform.State": { "type": "object", "additionalProperties": false, + "description": "Configuration for storing the Terraform state of the cluster.", "properties": { - "type": { - "type": "string", - "description": "The type of the provider, must be EKS if specified" + "s3": { + "$ref": "#/$defs/Spec.ToolsConfiguration.Terraform.State.S3" } }, "required": [ - "type" + "s3" ] }, - "Spec.Distribution.Modules": { + "Spec.ToolsConfiguration.Terraform.State.S3": { "type": "object", "additionalProperties": false, + "description": "Configuration for the S3 bucket used to store the Terraform state.", "properties": { - "auth": { - "$ref": "#/$defs/Spec.Distribution.Modules.Auth" - }, - "aws": { - "$ref": "#/$defs/Spec.Distribution.Modules.Aws" - }, - "dr": { - "$ref": "#/$defs/Spec.Distribution.Modules.Dr" - }, - "ingress": { - "$ref": "#/$defs/Spec.Distribution.Modules.Ingress" - }, - "logging": { - "$ref": "#/$defs/Spec.Distribution.Modules.Logging" - }, - "monitoring": { - "$ref": "#/$defs/Spec.Distribution.Modules.Monitoring" + "bucketName": { + "$ref": "#/$defs/Types.AwsS3BucketName", + "description": "This value defines which bucket will be used to store all the states." }, - "tracing": { - "$ref": "#/$defs/Spec.Distribution.Modules.Tracing" + "keyPrefix": { + "$ref": "#/$defs/Types.AwsS3KeyPrefix", + "description": "This value defines which folder will be used to store all the states inside the bucket." }, - "networking": { - "$ref": "#/$defs/Spec.Distribution.Modules.Networking" + "region": { + "$ref": "#/$defs/Types.AwsRegion", + "description": "This value defines in which region the bucket is located." }, - "policy": { - "$ref": "#/$defs/Spec.Distribution.Modules.Policy" + "skipRegionValidation": { + "type": "boolean", + "description": "This value defines if the region of the bucket should be validated or not by Terraform, useful when using a bucket in a recently added region." } }, "required": [ - "dr", - "ingress", - "logging", - "policy" + "bucketName", + "keyPrefix", + "region" ] }, - "Spec.Distribution.Modules.Auth": { + "Spec.Infrastructure": { "type": "object", "additionalProperties": false, "properties": { - "overrides": { - "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Overrides" - }, - "provider": { - "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Provider" - }, - "baseDomain": { - "type": "string", - "description": "The base domain for the auth module" - }, - "pomerium": { - "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Pomerium" + "vpc": { + "$ref": "#/$defs/Spec.Infrastructure.Vpc" }, - "dex": { - "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Dex" + "vpn": { + "$ref": "#/$defs/Spec.Infrastructure.Vpn" } }, - "required": [ - "provider" - ], "allOf": [ { "if": { - "properties": { - "provider": { + "allOf": [ + { "properties": { - "type": { - "const": "sso" + "vpc": { + "type": "null" + } + } + }, + { + "not": { + "properties": { + "vpn": { + "type": "null" + } } } } - } - }, - "then": { - "required": [ - "dex", - "pomerium", - "baseDomain" ] }, - "else": { + "then": { "properties": { - "dex": { - "type": "null" - }, - "pomerium": { - "type": "null" + "vpn": { + "required": [ + "vpcId" + ] } } } }, { "if": { - "properties": { - "provider": { - "properties": { - "type": { - "const": "basicAuth" + "allOf": [ + { + "not": { + "properties": { + "vpc": { + "type": "null" + } + } + } + }, + { + "not": { + "properties": { + "vpn": { + "properties": { + "vpcId": { + "type": "null" + } + } + } } } } - } - }, - "then": { - "properties": { - "provider": { - "required": [ - "basicAuth" - ] - } - } + ] }, - "else": { + "then": { "properties": { - "provider": { - "basicAuth": { - "type": "null" + "vpn": { + "properties": { + "vpcId": { + "type": "null" + } } } } @@ -352,816 +281,1102 @@ } ] }, - "Spec.Distribution.Modules.Auth.Dex": { + "Spec.Infrastructure.Vpc": { "type": "object", + "description": "Configuration for the VPC that will be created to host the EKS cluster and its related resources. If you already have a VPC that you want to use, leave this section empty and use `.spec.kubernetes.vpcId` instead.", "additionalProperties": false, "properties": { - "connectors": { - "type": "array", - "description": "The connectors for dex" - }, - "additionalStaticClients": { - "type": "array", - "description": "The additional static clients for dex" - }, - "expiry": { - "type": "object", - "additionalProperties": false, - "properties": { - "signingKeys": { - "type": "string", - "description": "Dex signing key expiration time duration (default 6h)." - }, - "idTokens": { - "type": "string", - "description": "Dex ID tokens expiration time duration (default 24h)." - } - } - }, - "overrides": { - "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + "network": { + "$ref": "#/$defs/Spec.Infrastructure.Vpc.Network" } }, "required": [ - "connectors" + "network" ] }, - "Spec.Distribution.Modules.Auth.Overrides": { - "type": "object", - "additionalProperties": false, - "properties": { - "nodeSelector": { - "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the auth module" - }, - "tolerations": { - "type": [ - "array", - "null" - ], - "items": { - "$ref": "#/$defs/Types.KubeToleration" - }, - "description": "The tolerations that will be added to the pods for the auth module" - }, - "ingresses": { - "type": "object", - "additionalProperties": { - "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Overrides.Ingress" - } - } - } - }, - "Spec.Distribution.Modules.Auth.Overrides.Ingress": { + "Spec.Infrastructure.Vpc.Network": { "type": "object", "additionalProperties": false, "properties": { - "host": { - "type": "string", - "description": "The host of the ingress" + "cidr": { + "$ref": "#/$defs/Types.Cidr", + "description": "The network CIDR for the VPC that will be created" }, - "ingressClass": { - "type": "string", - "description": "The ingress class of the ingress" + "subnetsCidrs": { + "$ref": "#/$defs/Spec.Infrastructure.Vpc.Network.SubnetsCidrs" } }, "required": [ - "host", - "ingressClass" + "cidr", + "subnetsCidrs" ] }, - "Spec.Distribution.Modules.Auth.Pomerium": { - "$ref": "../public/spec-distribution-modules-auth-pomerium.json" - }, - "Spec.Distribution.Modules.Auth.Provider": { + "Spec.Infrastructure.Vpc.Network.SubnetsCidrs": { "type": "object", + "description": "Network CIDRS configuration for private and public subnets.", "additionalProperties": false, "properties": { - "type": { - "type": "string", - "enum": [ - "none", - "basicAuth", - "sso" - ], - "description": "The type of the provider, must be ***none***, ***sso*** or ***basicAuth***" + "private": { + "type": "array", + "items": { + "$ref": "#/$defs/Types.Cidr" + }, + "description": "The network CIDRs for the private subnets, where the nodes, the pods, and the private load balancers will be created" }, - "basicAuth": { - "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Provider.BasicAuth" + "public": { + "type": "array", + "items": { + "$ref": "#/$defs/Types.Cidr" + }, + "description": "The network CIDRs for the public subnets, where the public load balancers and the VPN servers will be created" } }, "required": [ - "type" + "private", + "public" ] }, - "Spec.Distribution.Modules.Auth.Provider.BasicAuth": { + "Spec.Infrastructure.Vpn": { "type": "object", + "description": "Configuration for the VPN server instances.", "additionalProperties": false, "properties": { - "username": { + "instances": { + "type": "integer", + "description": "The number of VPN server instances to create, `0` to skip the creation." + }, + "port": { + "$ref": "#/$defs/Types.TcpPort", + "description": "The port where each OpenVPN server will listen for connections." + }, + "instanceType": { "type": "string", - "description": "The username for the basic auth" + "description": "The type of the AWS EC2 instance for each VPN server. Follows AWS EC2 nomenclature. Example: `t3-micro`." }, - "password": { + "diskSize": { + "type": "integer", + "description": "The size of the disk in GB for each VPN server. Example: entering `50` will create disks of 50 GB." + }, + "operatorName": { "type": "string", - "description": "The password for the basic auth" - } - }, - "required": [ - "username", - "password" - ] - }, - "Spec.Distribution.Modules.Aws": { - "type": "object", - "additionalProperties": false, - "properties": { - "clusterAutoscaler": { - "$ref": "#/$defs/Spec.Distribution.Modules.Aws.ClusterAutoscaler" + "description": "The username of the account to create in the bastion's operating system." }, - "ebsCsiDriver": { - "type": "object", - "additionalProperties": false, - "properties": { - "iamRoleArn": { - "$ref": "#/$defs/Types.AwsArn" - }, - "overrides": { - "$ref": "#/$defs/Types.FuryModuleComponentOverridesWithIAMRoleName" - } - }, - "required": [ - "iamRoleArn" - ] + "dhParamsBits": { + "type": "integer", + "description": "The `dhParamsBits` size used for the creation of the .pem file that will be used in the dh openvpn server.conf file." }, - "loadBalancerController": { - "type": "object", - "additionalProperties": false, - "properties": { - "iamRoleArn": { - "$ref": "#/$defs/Types.AwsArn" - }, - "overrides": { - "$ref": "#/$defs/Types.FuryModuleComponentOverridesWithIAMRoleName" - } - }, - "required": [ - "iamRoleArn" - ] + "vpnClientsSubnetCidr": { + "$ref": "#/$defs/Types.Cidr", + "description": "The network CIDR that will be used to assign IP addresses to the VPN clients when connected." }, - "ebsSnapshotController": { - "type": "object", - "additionalProperties": false, - "properties": { - "overrides": { - "$ref": "#/$defs/Types.FuryModuleComponentOverrides" - } - } + "ssh": { + "$ref": "#/$defs/Spec.Infrastructure.Vpn.Ssh" }, - "overrides": { - "$ref": "#/$defs/Types.FuryModuleOverrides" + "vpcId": { + "$ref": "#/$defs/Types.AwsVpcId", + "description": "The ID of the VPC where the VPN server instances will be created, required only if `.spec.infrastructure.vpc` is omitted." + }, + "bucketNamePrefix": { + "$ref": "#/$defs/Types.AwsS3BucketNamePrefix", + "description": "This value defines the prefix for the bucket name where the VPN servers will store their state (VPN certificates, users)." + }, + "iamUserNameOverride": { + "$ref": "#/$defs/Types.AwsIamRoleName", + "description": "Overrides IAM user name for the VPN. Default is to use the cluster name." } }, "required": [ - "clusterAutoscaler", - "ebsCsiDriver", - "loadBalancerController", - "overrides" + "ssh", + "vpnClientsSubnetCidr" ] }, - "Spec.Distribution.Modules.Aws.ClusterAutoscaler": { + "Spec.Infrastructure.Vpn.Ssh": { "type": "object", "additionalProperties": false, "properties": { - "iamRoleArn": { - "$ref": "#/$defs/Types.AwsArn" + "publicKeys": { + "type": "array", + "items": { + "anyOf": [ + { + "$ref": "#/$defs/Types.SshPubKey" + }, + { + "$ref": "#/$defs/Types.FileRef" + } + ] + }, + "description": "**NOT IN USE**, use `githubUsersName` instead. This value defines the public keys that will be added to the bastion's operating system." }, - "overrides": { - "$ref": "#/$defs/Types.FuryModuleComponentOverridesWithIAMRoleName" + "githubUsersName": { + "type": "array", + "items": { + "type": "string" + }, + "minItems": 1, + "description": "List of GitHub usernames from whom get their SSH public key and add as authorized keys of the `operatorName` user." + }, + "allowedFromCidrs": { + "type": "array", + "items": { + "$ref": "#/$defs/Types.Cidr" + }, + "description": "The network CIDR enabled in the security group to access the VPN servers (bastions) via SSH. Setting this to `0.0.0.0/0` will allow any source." } }, "required": [ - "iamRoleArn" + "allowedFromCidrs", + "githubUsersName" ] }, - "Spec.Distribution.Modules.Dr": { + "Spec.Kubernetes": { "type": "object", + "description": "Defines the Kubernetes components configuration and the values needed for the `kubernetes` phase of furyctl.", "additionalProperties": false, "properties": { - "overrides": { - "$ref": "#/$defs/Types.FuryModuleOverrides" + "vpcId": { + "$ref": "#/$defs/Types.AwsVpcId", + "description": "Required only if `.spec.infrastructure.vpc` is omitted. This value defines the ID of the VPC where the EKS cluster and its related resources will be created." }, - "type": { - "type": "string", - "enum": [ - "none", - "eks" - ], - "description": "The type of the DR, must be ***none*** or ***eks***" + "clusterIAMRoleNamePrefixOverride": { + "$ref": "#/$defs/Types.AwsIamRoleNamePrefix", + "description": "Overrides the default prefix for the IAM role name of the EKS cluster. If not set, a name will be generated from the cluster name." }, - "velero": { - "$ref": "#/$defs/Spec.Distribution.Modules.Dr.Velero" - } - }, - "required": [ - "type" - ], - "if": { - "properties": { - "type": { - "const": "eks" + "workersIAMRoleNamePrefixOverride": { + "$ref": "#/$defs/Types.AwsIamRoleNamePrefix", + "description": "Overrides the default prefix for the IAM role name of the EKS workers. If not set, a name will be generated from the cluster name." + }, + "subnetIds": { + "type": "array", + "items": { + "$ref": "#/$defs/Types.AwsSubnetId" + }, + "description": "Required only if `.spec.infrastructure.vpc` is omitted. This value defines the ID of the subnet where the EKS cluster will be created." + }, + "apiServer": { + "$ref": "#/$defs/Spec.Kubernetes.APIServer" + }, + "serviceIpV4Cidr": { + "$ref": "#/$defs/Types.Cidr", + "description": "This value defines the network CIDR that will be used to assign IP addresses to Kubernetes services." + }, + "nodeAllowedSshPublicKey": { + "anyOf": [ + { + "$ref": "#/$defs/Types.AwsSshPubKey" + }, + { + "$ref": "#/$defs/Types.FileRef" + } + ], + "description": "The SSH public key that can connect to the nodes via SSH using the `ec2-user` user. Example: the contents of your `~/.ssh/id_ras.pub` file." + }, + "nodePoolsLaunchKind": { + "type": "string", + "enum": [ + "launch_configurations", + "launch_templates", + "both" + ], + "description": "Accepted values are `launch_configurations`, `launch_templates` or `both`. For new clusters use `launch_templates`, for adopting an existing cluster you'll need to migrate from `launch_configurations` to `launch_templates` using `both` as interim." + }, + "nodePoolGlobalAmiType": { + "type": "string", + "enum": [ + "alinux2", + "alinux2023" + ], + "description": "Global default AMI type used for EKS worker nodes. This will apply to all node pools unless overridden by a specific node pool." + }, + "logRetentionDays": { + "type": "integer", + "description": "Optional Kubernetes Cluster log retention in CloudWatch, expressed in days. Setting the value to zero (`0`) makes retention last forever. Default is `90` days.", + "enum": [ + 0, + 1, + 3, + 5, + 7, + 14, + 30, + 60, + 90, + 120, + 150, + 180, + 365, + 400, + 545, + 731, + 1096, + 1827, + 2192, + 2557, + 2922, + 3288, + 3653 + ] + }, + "logsTypes": { + "type": "array", + "items": { + "type": "string", + "enum": [ + "api", + "audit", + "authenticator", + "controllerManager", + "scheduler" + ] + }, + "minItems": 0, + "description": "Optional list of Kubernetes Cluster log types to enable. Defaults to all types." + }, + "nodePools": { + "type": "array", + "items": { + "$ref": "#/$defs/Spec.Kubernetes.NodePool" } - } - }, - "then": { - "required": [ - "type", - "velero" - ] - } - }, - "Spec.Distribution.Modules.Dr.Velero": { - "type": "object", - "additionalProperties": false, - "properties": { - "eks": { - "$ref": "#/$defs/Spec.Distribution.Modules.Dr.Velero.Eks" }, - "overrides": { - "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + "awsAuth": { + "$ref": "#/$defs/Spec.Kubernetes.AwsAuth" } }, "required": [ - "eks" + "apiServer", + "nodeAllowedSshPublicKey", + "nodePools", + "nodePoolsLaunchKind", + "nodePoolGlobalAmiType" ] }, - "Spec.Distribution.Modules.Dr.Velero.Eks": { + "Spec.Kubernetes.APIServer": { + "type": "object", "additionalProperties": false, "properties": { - "bucketName": { - "$ref": "#/$defs/Types.AwsS3BucketName", - "maxLength": 49, - "description": "The name of the velero bucket" + "privateAccess": { + "type": "boolean", + "description": "This value defines if the Kubernetes API server will be accessible from the private subnets. Default it `true`." }, - "iamRoleArn": { - "$ref": "#/$defs/Types.AwsArn" + "privateAccessCidrs": { + "type": "array", + "items": { + "$ref": "#/$defs/Types.Cidr" + }, + "minItems": 0, + "description": "The network CIDRs from the private subnets that will be allowed access the Kubernetes API server." }, - "region": { - "$ref": "#/$defs/Types.AwsRegion", - "description": "The region where the velero bucket is located" + "publicAccessCidrs": { + "type": "array", + "items": { + "$ref": "#/$defs/Types.Cidr" + }, + "minItems": 0, + "description": "The network CIDRs from the public subnets that will be allowed access the Kubernetes API server." + }, + "publicAccess": { + "type": "boolean", + "description": "This value defines if the Kubernetes API server will be accessible from the public subnets. Default is `false`." } }, "required": [ - "iamRoleArn", - "region", - "bucketName" - ], - "type": "object" + "privateAccess", + "publicAccess" + ] }, - "Spec.Distribution.Modules.Ingress": { + "Spec.Kubernetes.NodePool": { + "type": "object", "additionalProperties": false, - "if": { - "properties": { - "nginx": { - "properties": { - "tls": { - "properties": { - "provider": { - "const": "certManager" - } - } - } - } - } - } - }, + "description": "Array with all the node pool definitions that will join the cluster. Each item is an object.", "properties": { - "baseDomain": { + "type": { + "description": "The type of Node Pool, can be `self-managed` for using customization like custom AMI, set max pods per node or `eks-managed` for using prebuilt AMIs from Amazon via the `ami.type` field. It is recommended to use `self-managed`.", "type": "string", - "description": "the base domain used for all the KFD ingresses, if in the nginx dual configuration, it should be the same as the .spec.distribution.modules.ingress.dns.private.name zone" + "enum": [ + "eks-managed", + "self-managed" + ] }, - "certManager": { - "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.CertManager" + "name": { + "type": "string", + "description": "The name of the node pool." }, - "dns": { - "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.DNS" + "ami": { + "$ref": "#/$defs/Spec.Kubernetes.NodePool.Ami" }, - "externalDns": { - "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.ExternalDNS" + "containerRuntime": { + "type": "string", + "enum": [ + "docker", + "containerd" + ], + "description": "The container runtime to use in the nodes of the node pool. Default is `containerd`." }, - "forecastle": { - "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Forecastle" + "size": { + "$ref": "#/$defs/Spec.Kubernetes.NodePool.Size" }, - "nginx": { - "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx", - "description": "Configurations for the nginx ingress controller module" + "instance": { + "$ref": "#/$defs/Spec.Kubernetes.NodePool.Instance" }, - "overrides": { - "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Overrides" + "attachedTargetGroups": { + "type": "array", + "items": { + "$ref": "#/$defs/Types.AwsArn" + }, + "description": "This optional array defines additional target groups to attach to the instances in the node pool." + }, + "labels": { + "$ref": "#/$defs/Types.KubeLabels", + "description": "Kubernetes labels that will be added to the nodes." + }, + "taints": { + "$ref": "#/$defs/Types.KubeTaints", + "description": "Kubernetes taints that will be added to the nodes." + }, + "tags": { + "$ref": "#/$defs/Types.AwsTags", + "description": "AWS tags that will be added to the ASG and EC2 instances." + }, + "subnetIds": { + "type": "array", + "items": { + "$ref": "#/$defs/Types.AwsSubnetId" + }, + "description": "Optional list of subnet IDs where to create the nodes." + }, + "additionalFirewallRules": { + "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRules" } }, "required": [ - "certManager", - "externalDns", - "baseDomain", - "dns", - "nginx" + "instance", + "name", + "size", + "type" ], - "then": { - "required": [ - "certManager" + "if": { + "allOf": [ + { + "properties": { + "type": { + "enum": [ + "eks-managed" + ] + } + } + } ] }, - "type": "object" + "then": { + "properties": { + "ami": { + "properties": { + "id": { + "type": "null" + }, + "owner": { + "type": "null" + } + } + } + } + } }, - "Spec.Distribution.Modules.Ingress.CertManager": { + "Spec.Kubernetes.NodePool.Ami": { "type": "object", "additionalProperties": false, + "description": "Configuration for customize the Amazon Machine Image (AMI) for the machines of the Node Pool.\n\nThe AMI can be chosen either by specifing the `ami.id` and `ami.owner` fields for using a custom AMI (just with `self-managed` node pool type) or by setting the `ami.type` field to one of the official AMIs based on Amazon Linux.", "properties": { - "clusterIssuer": { - "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.CertManager.ClusterIssuer" + "id": { + "type": "string", + "description": "The ID of the AMI to use for the nodes, must be set toghether with the `owner` field. `ami.id` and `ami.owner` can be only set when Node Pool type is `self-managed` and they can't be set at the same time than `ami.type`." }, - "overrides": { - "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + "owner": { + "type": "string", + "description": "The owner of the AMI to use for the nodes, must be set toghether with the `id` field. `ami.id` and `ami.owner` can be only set when Node Pool type is `self-managed` and they can't be set at the same time than `ami.type`." + }, + "type": { + "type": "string", + "description": "The AMI type defines the AMI to use for `eks-managed` and `self-managed` type of Node Pools. Only Amazon Linux based AMIs are supported. It can't be set at the same time than `ami.id` and `ami.owner`.", + "enum": [ + "alinux2", + "alinux2023" + ] } }, - "required": [ - "clusterIssuer" - ] - }, - "Spec.Distribution.Modules.Ingress.CertManager.ClusterIssuer": { - "additionalProperties": false, "oneOf": [ { - "required": [ - "type" + "allOf": [ + { + "required": [ + "id", + "owner" + ] + }, + { + "not": { + "required": [ + "type" + ] + } + } ] }, { - "required": [ - "solvers" + "allOf": [ + { + "required": [ + "type" + ] + }, + { + "not": { + "anyOf": [ + { + "required": [ + "id" + ] + }, + { + "required": [ + "owner" + ] + } + ] + } + } ] } - ], + ] + }, + "Spec.Kubernetes.NodePool.Instance": { + "type": "object", + "additionalProperties": false, + "description": "Configuration for the instances that will be used in the node pool.", "properties": { - "email": { - "type": "string", - "format": "email", - "description": "The email of the cluster issuer" - }, - "name": { + "type": { "type": "string", - "description": "The name of the cluster issuer" + "description": "The instance type to use for the nodes." }, - "route53": { - "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.ClusterIssuer.Route53" + "spot": { + "type": "boolean", + "description": "If `true`, the nodes will be created as spot instances. Default is `false`." }, - "solvers": { - "type": "array", - "description": "The custom solvers configurations" + "volumeSize": { + "type": "integer", + "description": "The size of the disk in GB." }, - "type": { + "volumeType": { "type": "string", + "description": "Volume type for the instance disk. Default is `gp2`.", "enum": [ - "dns01", - "http01" - ], - "description": "The type of the cluster issuer, must be ***dns01*** or ***http01***" + "gp2", + "gp3", + "io1", + "standard" + ] + }, + "maxPods": { + "type": "integer", + "description": "Set the maximum pods per node to a custom value. If not set will use EKS default value that depends on the instance type.\n\nRef: https://github.com/awslabs/amazon-eks-ami/blob/main/templates/shared/runtime/eni-max-pods.txt" } }, "required": [ - "route53", - "name", - "email" - ], - "type": "object" + "type" + ] }, - "Spec.Distribution.Modules.Ingress.ClusterIssuer.Route53": { + "Spec.Kubernetes.NodePool.Size": { "type": "object", "additionalProperties": false, "properties": { - "iamRoleArn": { - "$ref": "#/$defs/Types.AwsArn" - }, - "region": { - "$ref": "#/$defs/Types.AwsRegion" + "min": { + "type": "integer", + "minimum": 0, + "description": "The minimum number of nodes in the node pool." }, - "hostedZoneId": { - "type": "string" + "max": { + "type": "integer", + "minimum": 0, + "description": "The maximum number of nodes in the node pool." } }, "required": [ - "hostedZoneId", - "iamRoleArn", - "region" + "max", + "min" ] }, - "Spec.Distribution.Modules.Ingress.DNS": { + "Spec.Kubernetes.NodePool.AdditionalFirewallRules": { "type": "object", "additionalProperties": false, + "description": "Optional additional firewall rules that will be attached to the nodes.", "properties": { - "public": { - "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.DNS.Public" + "cidrBlocks": { + "type": "array", + "items": { + "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRule.CidrBlock" + }, + "minItems": 1, + "maxItems": 1, + "description": "The CIDR blocks objects definition for the Firewall rule. Even though it is a list, only one item is currently supported. See https://github.com/sighupio/fury-eks-installer/issues/46 for more details." }, - "private": { - "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.DNS.Private" + "sourceSecurityGroupId": { + "type": "array", + "items": { + "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRule.SourceSecurityGroupId" + }, + "minItems": 1 }, - "overrides": { - "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + "self": { + "type": "array", + "items": { + "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRule.Self" + }, + "minItems": 1 + } + } + }, + "Spec.Kubernetes.NodePool.AdditionalFirewallRule.CidrBlock": { + "type": "object", + "additionalProperties": false, + "properties": { + "name": { + "type": "string" + }, + "type": { + "type": "string", + "description": "The type of the Firewall rule, can be `ingress` for incoming traffic or `egress` for outgoing traffic.", + "enum": [ + "ingress", + "egress" + ] + }, + "tags": { + "$ref": "#/$defs/Types.AwsTags", + "description": "Additional AWS tags for the Firewall rule." + }, + "cidrBlocks": { + "type": "array", + "items": { + "$ref": "#/$defs/Types.Cidr" + }, + "minItems": 1 + }, + "protocol": { + "$ref": "#/$defs/Types.AwsIpProtocol" + }, + "ports": { + "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRule.Ports" } }, "required": [ - "public", - "private" + "cidrBlocks", + "name", + "ports", + "protocol", + "type" ] }, - "Spec.Distribution.Modules.Ingress.DNS.Private": { + "Spec.Kubernetes.NodePool.AdditionalFirewallRule.SourceSecurityGroupId": { + "type": "object", "additionalProperties": false, "properties": { - "create": { - "type": "boolean", - "description": "If true, the private hosted zone will be created" - }, "name": { "type": "string", - "description": "The name of the private hosted zone" + "description": "The name for the additional Firewall rule Security Group." }, - "vpcId": { - "type": "string" + "type": { + "type": "string", + "enum": [ + "ingress", + "egress" + ], + "description": "The type of the Firewall rule, can be `ingress` for incoming traffic or `egress` for outgoing traffic." + }, + "tags": { + "$ref": "#/$defs/Types.AwsTags", + "description": "Additional AWS tags for the Firewall rule." + }, + "sourceSecurityGroupId": { + "type": "string", + "description": "The source security group ID." + }, + "protocol": { + "$ref": "#/$defs/Types.AwsIpProtocol", + "description": "The protocol of the Firewall rule." + }, + "ports": { + "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRule.Ports" } }, "required": [ - "vpcId", + "sourceSecurityGroupId", "name", - "create" - ], - "type": "object" + "ports", + "protocol", + "type" + ] }, - "Spec.Distribution.Modules.Ingress.DNS.Public": { + "Spec.Kubernetes.NodePool.AdditionalFirewallRule.Self": { "type": "object", "additionalProperties": false, "properties": { "name": { "type": "string", - "description": "The name of the public hosted zone" + "description": "The name of the Firewall rule." }, - "create": { + "type": { + "type": "string", + "enum": [ + "ingress", + "egress" + ], + "description": "The type of the Firewall rule, can be `ingress` for incoming traffic or `egress` for outgoing traffic." + }, + "tags": { + "$ref": "#/$defs/Types.AwsTags", + "description": "Additional AWS tags for the Firewall rule." + }, + "self": { "type": "boolean", - "description": "If true, the public hosted zone will be created" + "description": "If `true`, the source will be the security group itself." + }, + "protocol": { + "$ref": "#/$defs/Types.AwsIpProtocol", + "description": "The protocol of the Firewall rule." + }, + "ports": { + "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRule.Ports" } }, "required": [ + "self", "name", - "create" + "ports", + "protocol", + "type" ] }, - "Spec.Distribution.Modules.Ingress.ExternalDNS": { + "Spec.Kubernetes.NodePool.AdditionalFirewallRule.Ports": { "type": "object", + "description": "Port range for the Firewall Rule.", "additionalProperties": false, "properties": { - "privateIamRoleArn": { - "$ref": "#/$defs/Types.AwsArn" + "from": { + "$ref": "#/$defs/Types.TcpPort" }, - "publicIamRoleArn": { - "$ref": "#/$defs/Types.AwsArn" + "to": { + "$ref": "#/$defs/Types.TcpPort" } }, "required": [ - "privateIamRoleArn", - "publicIamRoleArn" + "from", + "to" ] }, - "Spec.Distribution.Modules.Ingress.Forecastle": { + "Spec.Kubernetes.AwsAuth": { "type": "object", + "description": "Optional additional security configuration for EKS IAM via the `aws-auth` configmap.\n\nRef: https://docs.aws.amazon.com/eks/latest/userguide/auth-configmap.html", "additionalProperties": false, "properties": { - "overrides": { - "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + "additionalAccounts": { + "type": "array", + "items": { + "type": "string" + }, + "description": "This optional array defines additional AWS accounts that will be added to the `aws-auth` configmap." + }, + "users": { + "type": "array", + "items": { + "$ref": "#/$defs/Spec.Kubernetes.AwsAuth.User" + }, + "description": "This optional array defines additional IAM users that will be added to the `aws-auth` configmap." + }, + "roles": { + "type": "array", + "items": { + "$ref": "#/$defs/Spec.Kubernetes.AwsAuth.Role" + }, + "description": "This optional array defines additional IAM roles that will be added to the `aws-auth` configmap." } } }, - "Spec.Distribution.Modules.Ingress.Nginx": { + "Spec.Kubernetes.AwsAuth.Role": { "type": "object", "additionalProperties": false, "properties": { - "type": { - "type": "string", - "enum": [ - "none", - "single", - "dual" - ], - "description": "The type of the nginx ingress controller, must be ***none***, ***single*** or ***dual***" + "username": { + "type": "string" }, - "tls": { - "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx.TLS" + "groups": { + "type": "array", + "items": { + "type": "string" + } }, - "overrides": { - "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + "rolearn": { + "$ref": "#/$defs/Types.AwsArn" } }, "required": [ - "type" + "groups", + "rolearn", + "username" ] }, - "Spec.Distribution.Modules.Ingress.Nginx.TLS": { + "Spec.Kubernetes.AwsAuth.User": { "type": "object", "additionalProperties": false, "properties": { - "provider": { - "type": "string", - "enum": [ - "certManager", - "secret", - "none" - ], - "description": "The provider of the TLS certificate, must be ***none***, ***certManager*** or ***secret***" + "username": { + "type": "string" }, - "secret": { - "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx.TLS.Secret" - } - }, - "required": [ - "provider" - ], - "if": { - "properties": { - "provider": { - "const": "secret" + "groups": { + "type": "array", + "items": { + "type": "string" } + }, + "userarn": { + "$ref": "#/$defs/Types.AwsArn" } }, - "then": { - "required": [ - "secret" - ] - } + "required": [ + "groups", + "userarn", + "username" + ] }, - "Spec.Distribution.Modules.Ingress.Nginx.TLS.Secret": { + "Spec.Distribution": { "type": "object", "additionalProperties": false, "properties": { - "cert": { - "type": "string", - "description": "The certificate file content or you can use the file notation to get the content from a file" + "common": { + "$ref": "#/$defs/Spec.Distribution.Common" }, - "key": { - "type": "string" + "modules": { + "$ref": "#/$defs/Spec.Distribution.Modules" }, - "ca": { - "type": "string" + "customPatches": { + "$ref": "../public/spec-distribution-custompatches.json" } }, "required": [ - "ca", - "cert", - "key" - ] + "modules" + ], + "if": { + "allOf": [ + { + "required": [ + "common" + ] + }, + { + "properties": { + "common": { + "required": [ + "provider" + ] + } + } + }, + { + "properties": { + "common": { + "properties": { + "provider": { + "required": [ + "type" + ] + } + } + } + } + }, + { + "properties": { + "common": { + "properties": { + "provider": { + "properties": { + "type": { + "const": "eks" + } + } + } + } + } + } + } + ] + }, + "then": { + "properties": { + "modules": { + "required": [ + "aws" + ] + } + } + }, + "else": { + "properties": { + "modules": { + "properties": { + "aws": { + "type": "null" + } + } + } + } + } }, - "Spec.Distribution.Modules.Ingress.Overrides": { + "Spec.Distribution.Common": { "type": "object", "additionalProperties": false, + "description": "Common configuration for all the distribution modules.", "properties": { - "ingresses": { - "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Overrides.Ingresses" - }, "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the ingress module" + "description": "The node selector to use to place the pods for all the KFD modules. Follows Kubernetes selector format. Example: `node.kubernetes.io/role: infra`." }, "tolerations": { "type": "array", "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for the ingress module" + "description": "An array with the tolerations that will be added to the pods for all the KFD modules. Follows Kubernetes tolerations format. Example:\n\n```yaml\n- effect: NoSchedule\n key: node.kubernetes.io/role\n value: infra\n```" + }, + "provider": { + "$ref": "#/$defs/Spec.Distribution.Common.Provider" + }, + "relativeVendorPath": { + "type": "string", + "description": "The relative path to the vendor directory, does not need to be changed." + }, + "registry": { + "type": "string", + "description": "URL of the registry where to pull images from for the Distribution phase. (Default is `registry.sighup.io/fury`).\n\nNOTE: If plugins are pulling from the default registry, the registry will be replaced for the plugin too." } } }, - "Spec.Distribution.Modules.Ingress.Overrides.Ingresses": { + "Spec.Distribution.Common.Provider": { "type": "object", "additionalProperties": false, "properties": { - "forecastle": { - "$ref": "#/$defs/Types.FuryModuleOverridesIngress" + "type": { + "type": "string", + "description": "The provider type. Don't set. FOR INTERNAL USE ONLY." } - } + }, + "required": [ + "type" + ] }, - "Spec.Distribution.Modules.Logging": { + "Spec.Distribution.Modules": { + "type": "object", + "additionalProperties": false, + "properties": { + "auth": { + "$ref": "#/$defs/Spec.Distribution.Modules.Auth" + }, + "aws": { + "$ref": "#/$defs/Spec.Distribution.Modules.Aws" + }, + "dr": { + "$ref": "#/$defs/Spec.Distribution.Modules.Dr" + }, + "ingress": { + "$ref": "#/$defs/Spec.Distribution.Modules.Ingress" + }, + "logging": { + "$ref": "#/$defs/Spec.Distribution.Modules.Logging" + }, + "monitoring": { + "$ref": "#/$defs/Spec.Distribution.Modules.Monitoring" + }, + "tracing": { + "$ref": "#/$defs/Spec.Distribution.Modules.Tracing" + }, + "networking": { + "$ref": "#/$defs/Spec.Distribution.Modules.Networking" + }, + "policy": { + "$ref": "#/$defs/Spec.Distribution.Modules.Policy" + } + }, + "required": [ + "dr", + "ingress", + "logging", + "policy" + ] + }, + "Spec.Distribution.Modules.Ingress": { "type": "object", "additionalProperties": false, "properties": { "overrides": { - "$ref": "#/$defs/Types.FuryModuleOverrides" + "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Overrides" }, - "type": { + "baseDomain": { "type": "string", - "enum": [ - "none", - "opensearch", - "loki", - "customOutputs" - ], - "description": "selects the logging stack. Choosing none will disable the centralized logging. Choosing opensearch will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored. Choosing loki will use a distributed Grafana Loki instead of OpenSearh for storage. Choosing customOuput the Logging Operator will be deployed and installed but with no local storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage." + "description": "The base domain used for all the KFD infrastructural ingresses. If in the nginx `dual` configuration type, this value should be the same as the `.spec.distribution.modules.ingress.dns.private.name` zone." }, - "opensearch": { - "$ref": "#/$defs/Spec.Distribution.Modules.Logging.Opensearch" - }, - "loki": { - "$ref": "#/$defs/Spec.Distribution.Modules.Logging.Loki" + "nginx": { + "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx", + "description": "Configurations for the Ingress nginx controller package." }, - "cerebro": { - "$ref": "#/$defs/Spec.Distribution.Modules.Logging.Cerebro" + "certManager": { + "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.CertManager", + "description": "Configuration for the cert-manager package. Required even if `ingress.nginx.type` is `none`, cert-manager is used for managing other certificates in the cluster besides the TLS termination certificates for the ingresses." }, - "minio": { - "$ref": "#/$defs/Spec.Distribution.Modules.Logging.Minio" + "dns": { + "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.DNS" }, - "operator": { - "$ref": "#/$defs/Spec.Distribution.Modules.Logging.Operator" + "forecastle": { + "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Forecastle" }, - "customOutputs": { - "$ref": "#/$defs/Spec.Distribution.Modules.Logging.CustomOutputs" + "externalDns": { + "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.ExternalDNS" } }, "required": [ - "type" + "certManager", + "externalDns", + "baseDomain", + "nginx" ], "allOf": [ { "if": { "properties": { - "type": { - "const": "opensearch" + "nginx": { + "properties": { + "type": { + "const": "dual" + } + } } } }, "then": { "required": [ - "opensearch" - ] + "dns" + ], + "properties": { + "dns": { + "required": [ + "public", + "private" + ] + } + } } }, { "if": { "properties": { - "type": { - "const": "customOutputs" + "nginx": { + "properties": { + "type": { + "const": "single" + } + } } } }, "then": { "required": [ - "customOutputs" - ] + "dns" + ], + "properties": { + "dns": { + "required": [ + "public" + ] + } + } + } + }, + { + "if": { + "properties": { + "nginx": { + "properties": { + "tls": { + "properties": { + "provider": { + "const": "certManager" + } + } + } + } + } + } + }, + "then": { + "required": [ + "certManager" + ] } } - ] - }, - "Spec.Distribution.Modules.Logging.Cerebro": { - "type": "object", - "additionalProperties": false, - "properties": { - "overrides": { - "$ref": "#/$defs/Types.FuryModuleComponentOverrides" - } - } + ] }, - "Spec.Distribution.Modules.Logging.CustomOutputs": { - "description": "when using the customOutputs logging type, you need to manually specify the spec of the several Output and ClusterOutputs that the Logging Operator expects to forward the logs collected by the pre-defined flows.", + "Spec.Distribution.Modules.Ingress.Overrides": { "type": "object", "additionalProperties": false, + "description": "Override the common configuration with a particular configuration for the Ingress module.", "properties": { - "audit": { - "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." - }, - "events": { - "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." - }, - "infra": { - "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." - }, - "ingressNginx": { - "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." - }, - "kubernetes": { - "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." - }, - "systemdCommon": { - "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "ingresses": { + "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Overrides.Ingresses" }, - "systemdEtcd": { - "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "nodeSelector": { + "$ref": "#/$defs/Types.KubeNodeSelector", + "description": "Set to override the node selector used to place the pods of the Ingress module." }, - "errors": { - "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "tolerations": { + "type": "array", + "items": { + "$ref": "#/$defs/Types.KubeToleration" + }, + "description": "Set to override the tolerations that will be added to the pods of the Ingress module." } - }, - "required": [ - "audit", - "events", - "infra", - "ingressNginx", - "kubernetes", - "systemdCommon", - "systemdEtcd", - "errors" - ] + } }, - "Spec.Distribution.Modules.Logging.Loki": { + "Spec.Distribution.Modules.Ingress.Overrides.Ingresses": { "type": "object", "additionalProperties": false, "properties": { - "backend": { - "type": "string", - "enum": [ - "minio", - "externalEndpoint" - ] - }, - "externalEndpoint": { - "type": "object", - "additionalProperties": false, - "properties": { - "endpoint": { - "type": "string", - "description": "The endpoint of the loki external endpoint" - }, - "insecure": { - "type": "boolean", - "description": "If true, the loki external endpoint will be insecure" - }, - "secretAccessKey": { - "type": "string", - "description": "The secret access key of the loki external endpoint" - }, - "accessKeyId": { - "type": "string", - "description": "The access key id of the loki external endpoint" - }, - "bucketName": { - "type": "string", - "description": "The bucket name of the loki external endpoint" - } - } - }, - "resources": { - "$ref": "#/$defs/Types.KubeResources" + "forecastle": { + "$ref": "#/$defs/Types.FuryModuleOverridesIngress" } } }, - "Spec.Distribution.Modules.Logging.Minio": { + "Spec.Distribution.Modules.Ingress.Forecastle": { "type": "object", "additionalProperties": false, "properties": { - "storageSize": { - "type": "string", - "description": "The PVC size for each minio disk, 6 disks total" - }, - "rootUser": { - "type": "object", - "additionalProperties": false, - "properties": { - "username": { - "type": "string", - "description": "The username of the minio root user" - }, - "password": { - "type": "string", - "description": "The password of the minio root user" - } - } - }, "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" } } }, - "Spec.Distribution.Modules.Logging.Opensearch": { + "Spec.Distribution.Modules.Ingress.Nginx": { "type": "object", "additionalProperties": false, "properties": { "type": { "type": "string", "enum": [ + "none", "single", - "triple" + "dual" ], - "description": "The type of the opensearch, must be ***single*** or ***triple***" - }, - "resources": { - "$ref": "#/$defs/Types.KubeResources" + "description": "The type of the Ingress nginx controller, options are:\n- `none`: no ingress controller will be installed and no infrastructural ingresses will be created.\n- `single`: a single ingress controller with ingress class `nginx` will be installed to manage all the ingress resources, infrastructural ingresses will be created.\n- `dual`: two independent ingress controllers will be installed, one for the `internal` ingress class intended for private ingresses and one for the `external` ingress class intended for public ingresses. KFD infrastructural ingresses wil use the `internal` ingress class when using the dual type.\n\nDefault is `single`." }, - "storageSize": { - "type": "string", - "description": "The storage size for the opensearch pods" + "tls": { + "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx.TLS" }, "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" @@ -1171,275 +1386,188 @@ "type" ] }, - "Spec.Distribution.Modules.Logging.Operator": { - "type": "object", - "additionalProperties": false, - "properties": { - "overrides": { - "$ref": "#/$defs/Types.FuryModuleComponentOverrides" - } - } - }, - "Spec.Distribution.Modules.Monitoring": { + "Spec.Distribution.Modules.Ingress.Nginx.TLS": { "type": "object", "additionalProperties": false, - "description": "configuration for the Monitoring module components", "properties": { - "type": { + "provider": { "type": "string", "enum": [ - "none", - "prometheus", - "prometheusAgent", - "mimir" + "certManager", + "secret", + "none" ], - "description": "The type of the monitoring, must be ***none***, ***prometheus***, ***prometheusAgent*** or ***mimir***.\n\n- `none`: will disable the whole monitoring stack.\n- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instance, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more.\n- `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster.\n- `mimir`: will install the same as the `prometheus` option, and in addition Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage." - }, - "overrides": { - "$ref": "#/$defs/Types.FuryModuleOverrides" - }, - "prometheus": { - "$ref": "#/$defs/Spec.Distribution.Modules.Monitoring.Prometheus" - }, - "prometheusAgent": { - "$ref": "#/$defs/Spec.Distribution.Modules.Monitoring.PrometheusAgent" - }, - "alertmanager": { - "$ref": "#/$defs/Spec.Distribution.Modules.Monitoring.AlertManager" - }, - "grafana": { - "$ref": "#/$defs/Spec.Distribution.Modules.Monitoring.Grafana" - }, - "blackboxExporter": { - "$ref": "#/$defs/Spec.Distribution.Modules.Monitoring.BlackboxExporter" - }, - "kubeStateMetrics": { - "$ref": "#/$defs/Spec.Distribution.Modules.Monitoring.KubeStateMetrics" - }, - "x509Exporter": { - "$ref": "#/$defs/Spec.Distribution.Modules.Monitoring.X509Exporter" + "description": "The provider of the TLS certificates for the ingresses, one of: `none`, `certManager`, or `secret`." }, - "mimir": { - "$ref": "#/$defs/Spec.Distribution.Modules.Monitoring.Mimir" - }, - "minio": { - "$ref": "#/$defs/Spec.Distribution.Modules.Monitoring.Minio" + "secret": { + "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx.TLS.Secret" } }, "required": [ - "type" - ] - }, - "Spec.Distribution.Modules.Monitoring.AlertManager": { - "type": "object", - "additionalProperties": false, - "properties": { - "deadManSwitchWebhookUrl": { - "type": "string", - "description": "The webhook url to send deadman switch monitoring, for example to use with healthchecks.io" - }, - "installDefaultRules": { - "type": "boolean", - "description": "If true, the default rules will be installed" - }, - "slackWebhookUrl": { - "type": "string", - "description": "The slack webhook url to send alerts" - } - } - }, - "Spec.Distribution.Modules.Monitoring.BlackboxExporter": { - "type": "object", - "additionalProperties": false, - "properties": { - "overrides": { - "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + "provider" + ], + "if": { + "properties": { + "provider": { + "const": "secret" + } } + }, + "then": { + "required": [ + "secret" + ] } }, - "Spec.Distribution.Modules.Monitoring.Grafana": { + "Spec.Distribution.Modules.Ingress.Nginx.TLS.Secret": { "type": "object", "additionalProperties": false, + "description": "Kubernetes TLS secret for the ingresses TLS certificate.", "properties": { - "usersRoleAttributePath": { + "cert": { "type": "string", - "description": "[JMESPath](http://jmespath.org/examples.html) expression to retrieve the user's role. Example:\n\n```yaml\nusersRoleAttributePath: \"contains(groups[*], 'beta') && 'Admin' || contains(groups[*], 'gamma') && 'Editor' || contains(groups[*], 'delta') && 'Viewer'\n```\n\nMore details in [Grafana's documentation](https://grafana.com/docs/grafana/latest/setup-grafana/configure-security/configure-authentication/generic-oauth/#configure-role-mapping)." + "description": "The certificate file's content. You can use the `\"{file://}\"` notation to get the content from a file." }, - "basicAuthIngress": { - "type": "boolean", - "description": "Setting this to true will deploy an additional `grafana-basic-auth` ingress protected with Grafana's basic auth instead of SSO. It's intended use is as a temporary ingress for when there are problems with the SSO login flow.\n\nNotice that by default anonymous access is enabled." + "key": { + "type": "string", + "description": "The signing key file's content. You can use the `\"{file://}\"` notation to get the content from a file." }, - "overrides": { - "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + "ca": { + "type": "string", + "description": "The Certificate Authority certificate file's content. You can use the `\"{file://}\"` notation to get the content from a file." } - } + }, + "required": [ + "ca", + "cert", + "key" + ] }, - "Spec.Distribution.Modules.Monitoring.KubeStateMetrics": { + "Spec.Distribution.Modules.Ingress.CertManager": { "type": "object", "additionalProperties": false, + "description": "Configuration for the cert-manager package. Required even if `ingress.nginx.type` is `none`, cert-manager is used for managing other certificates in the cluster besides the TLS termination certificates for the ingresses.", "properties": { + "clusterIssuer": { + "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.CertManager.ClusterIssuer" + }, "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" } - } + }, + "required": [ + "clusterIssuer" + ] }, - "Spec.Distribution.Modules.Monitoring.Mimir": { + "Spec.Distribution.Modules.Ingress.CertManager.ClusterIssuer": { "type": "object", "additionalProperties": false, + "description": "Configuration for the cert-manager's ACME clusterIssuer used to request certificates from Let's Encrypt.", "properties": { - "retentionTime": { + "name": { "type": "string", - "description": "The retention time for the mimir pods" + "description": "The name of the clusterIssuer." }, - "backend": { + "email": { + "type": "string", + "format": "email", + "description": "The email address to use during the certificate issuing process." + }, + "type": { "type": "string", "enum": [ - "minio", - "externalEndpoint" + "dns01", + "http01" ], - "description": "The backend for the mimir pods, must be ***minio*** or ***externalEndpoint***" + "description": "The type of the clusterIssuer, must be `dns01` for using DNS challenge or `http01` for using HTTP challenge." }, - "externalEndpoint": { - "type": "object", - "additionalProperties": false, - "properties": { - "endpoint": { - "type": "string", - "description": "The endpoint of the external mimir backend" - }, - "insecure": { - "type": "boolean", - "description": "If true, the external mimir backend will not use tls" - }, - "secretAccessKey": { - "type": "string", - "description": "The secret access key of the external mimir backend" - }, - "accessKeyId": { - "type": "string", - "description": "The access key id of the external mimir backend" - }, - "bucketName": { - "type": "string", - "description": "The bucket name of the external mimir backend" - } - } + "solvers": { + "type": "array", + "description": "The list of challenge solvers to use instead of the default one for the `http01` challenge. Check [cert manager's documentation](https://cert-manager.io/docs/configuration/acme/#adding-multiple-solver-types) for examples for this field." }, - "overrides": { - "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + "route53": { + "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.ClusterIssuer.Route53" } - } + }, + "required": [ + "route53", + "name", + "email" + ], + "oneOf": [ + { + "required": [ + "type" + ] + }, + { + "required": [ + "solvers" + ] + } + ] }, - "Spec.Distribution.Modules.Monitoring.Minio": { + "Spec.Distribution.Modules.Ingress.DNS": { "type": "object", + "description": "DNS definition, used in conjunction with `externalDNS` package to automate DNS management and certificates emission.", "additionalProperties": false, "properties": { - "storageSize": { - "type": "string", - "description": "The storage size for the minio pods" + "public": { + "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.DNS.Public" }, - "rootUser": { - "type": "object", - "additionalProperties": false, - "properties": { - "username": { - "type": "string", - "description": "The username for the minio root user" - }, - "password": { - "type": "string", - "description": "The password for the minio root user" - } - } + "private": { + "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.DNS.Private" }, "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" } } }, - "Spec.Distribution.Modules.Monitoring.Prometheus": { + "Spec.Distribution.Modules.Ingress.DNS.Public": { "type": "object", "additionalProperties": false, "properties": { - "resources": { - "$ref": "#/$defs/Types.KubeResources" - }, - "retentionTime": { - "type": "string", - "description": "The retention time for the k8s Prometheus instance." - }, - "retentionSize": { - "type": "string", - "description": "The retention size for the k8s Prometheus instance." - }, - "storageSize": { + "name": { "type": "string", - "description": "The storage size for the k8s Prometheus instance." - }, - "remoteWrite": { - "description": "Set this option to ship the collected metrics to a remote Prometheus receiver.\n\n`remoteWrite` is an array of objects that allows configuring the [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for Prometheus. The objects in the array follow [the same schema as in the prometheus operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec).", - "type": "array", - "items": { - "type": "object" - } - } - } - }, - "Spec.Distribution.Modules.Monitoring.PrometheusAgent": { - "type": "object", - "additionalProperties": false, - "properties": { - "resources": { - "$ref": "#/$defs/Types.KubeResources" + "description": "The name of the public hosted zone." }, - "remoteWrite": { - "description": "Set this option to ship the collected metrics to a remote Prometheus receiver.\n\n`remoteWrite` is an array of objects that allows configuring the [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for Prometheus. The objects in the array follow [the same schema as in the prometheus operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec).", - "type": "array", - "items": { - "type": "object" - } + "create": { + "type": "boolean", + "description": "By default, a Terraform data source will be used to get the public DNS zone. Set to `true` to create the public zone instead." } - } + }, + "required": [ + "name", + "create" + ] }, - "Spec.Distribution.Modules.Monitoring.X509Exporter": { + "Spec.Distribution.Modules.Ingress.DNS.Private": { "type": "object", + "description": "The private DNS zone is used only when `ingress.nginx.type` is `dual`, for exposing infrastructural services only in the private DNS zone.", "additionalProperties": false, "properties": { - "overrides": { - "$ref": "#/$defs/Types.FuryModuleComponentOverrides" - } - } - }, - "Spec.Distribution.Modules.Networking": { - "additionalProperties": false, - "properties": { - "overrides": { - "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + "name": { + "type": "string", + "description": "The name of the private hosted zone. Example: `internal.fury-demo.sighup.io`." }, - "tigeraOperator": { - "$ref": "#/$defs/Spec.Distribution.Modules.Networking.TigeraOperator" + "create": { + "type": "boolean", + "description": "By default, a Terraform data source will be used to get the private DNS zone. Set to `true` to create the private zone instead." }, - "type": { - "type": "string", - "enum": [ - "none" - ] + "vpcId": { + "type": "string" } }, - "type": "object" - }, - "Spec.Distribution.Modules.Networking.TigeraOperator": { - "type": "object", - "additionalProperties": false, - "properties": { - "overrides": { - "$ref": "#/$defs/Types.FuryModuleComponentOverrides" - } - } + "required": [ + "vpcId", + "name", + "create" + ] }, - "Spec.Distribution.Modules.Policy": { + "Spec.Distribution.Modules.Logging": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Logging module.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -1448,16 +1576,29 @@ "type": "string", "enum": [ "none", - "gatekeeper", - "kyverno" + "opensearch", + "loki", + "customOutputs" ], - "description": "The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno***" + "description": "Selects the logging stack. Options are:\n- `none`: will disable the centralized logging.\n- `opensearch`: will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored.\n- `loki`: will use a distributed Grafana Loki instead of OpenSearch for storage.\n- `customOuputs`: the Logging Operator will be deployed and installed but without in-cluster storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage.\n\nDefault is `opensearch`." }, - "gatekeeper": { - "$ref": "#/$defs/Spec.Distribution.Modules.Policy.Gatekeeper" + "opensearch": { + "$ref": "#/$defs/Spec.Distribution.Modules.Logging.Opensearch" }, - "kyverno": { - "$ref": "#/$defs/Spec.Distribution.Modules.Policy.Kyverno" + "loki": { + "$ref": "#/$defs/Spec.Distribution.Modules.Logging.Loki" + }, + "cerebro": { + "$ref": "#/$defs/Spec.Distribution.Modules.Logging.Cerebro" + }, + "minio": { + "$ref": "#/$defs/Spec.Distribution.Modules.Logging.Minio" + }, + "operator": { + "$ref": "#/$defs/Spec.Distribution.Modules.Logging.Operator" + }, + "customOutputs": { + "$ref": "#/$defs/Spec.Distribution.Modules.Logging.CustomOutputs" } }, "required": [ @@ -1468,13 +1609,13 @@ "if": { "properties": { "type": { - "const": "gatekeeper" + "const": "opensearch" } } }, "then": { "required": [ - "gatekeeper" + "opensearch" ] } }, @@ -1482,116 +1623,77 @@ "if": { "properties": { "type": { - "const": "kyverno" + "const": "loki" } } }, "then": { "required": [ - "kyverno" + "loki" + ] + } + }, + { + "if": { + "properties": { + "type": { + "const": "customOutputs" + } + } + }, + "then": { + "required": [ + "customOutputs" ] } } ] }, - "Spec.Distribution.Modules.Policy.Gatekeeper": { + "Spec.Distribution.Modules.Logging.Opensearch": { "type": "object", "additionalProperties": false, "properties": { - "additionalExcludedNamespaces": { - "type": "array", - "items": { - "type": "string" - }, - "description": "This parameter adds namespaces to Gatekeeper's exemption list, so it will not enforce the constraints on them." - }, - "enforcementAction": { + "type": { "type": "string", "enum": [ - "deny", - "dryrun", - "warn" + "single", + "triple" ], - "description": "The enforcement action to use for the gatekeeper module" + "description": "The type of OpenSearch deployment. One of: `single` for a single replica or `triple` for an HA 3-replicas deployment." }, - "installDefaultPolicies": { - "type": "boolean", - "description": "If true, the default policies will be installed" + "resources": { + "$ref": "#/$defs/Types.KubeResources" + }, + "storageSize": { + "type": "string", + "description": "The storage size for the OpenSearch volumes. Follows Kubernetes resources storage requests. Default is `150Gi`." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" } }, "required": [ - "enforcementAction", - "installDefaultPolicies" + "type" ] }, - "Spec.Distribution.Modules.Policy.Kyverno": { + "Spec.Distribution.Modules.Logging.Cerebro": { "type": "object", + "description": "DEPRECATED since KFD v1.26.6, 1.27.5, v1.28.0.", "additionalProperties": false, "properties": { - "additionalExcludedNamespaces": { - "type": "array", - "items": { - "type": "string" - }, - "description": "This parameter adds namespaces to Kyverno's exemption list, so it will not enforce the constraints on them." - }, - "validationFailureAction": { - "type": "string", - "enum": [ - "audit", - "enforce" - ], - "description": "The validation failure action to use for the kyverno module" - }, - "installDefaultPolicies": { - "type": "boolean", - "description": "If true, the default policies will be installed" - }, "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" } - }, - "required": [ - "validationFailureAction", - "installDefaultPolicies" - ] + } }, - "Spec.Distribution.Modules.Tracing": { + "Spec.Distribution.Modules.Logging.Minio": { "type": "object", "additionalProperties": false, + "description": "Configuration for Logging's MinIO deployment.", "properties": { - "overrides": { - "$ref": "#/$defs/Types.FuryModuleOverrides" - }, - "type": { + "storageSize": { "type": "string", - "enum": [ - "none", - "tempo" - ], - "description": "The type of tracing to use, either ***none*** or ***tempo***" - }, - "tempo": { - "$ref": "#/$defs/Spec.Distribution.Modules.Tracing.Tempo" - }, - "minio": { - "$ref": "#/$defs/Spec.Distribution.Modules.Tracing.Minio" - } - }, - "required": [ - "type" - ] - }, - "Spec.Distribution.Modules.Tracing.Minio": { - "type": "object", - "additionalProperties": false, - "properties": { - "storageSize": { - "type": "string", - "description": "The storage size for the minio pods" + "description": "The PVC size for each MinIO disk, 6 disks total." }, "rootUser": { "type": "object", @@ -1599,11 +1701,11 @@ "properties": { "username": { "type": "string", - "description": "The username for the minio root user" + "description": "The username for the default MinIO root user." }, "password": { "type": "string", - "description": "The password for the minio root user" + "description": "The password for the default MinIO root user." } } }, @@ -1612,858 +1714,1041 @@ } } }, - "Spec.Distribution.Modules.Tracing.Tempo": { + "Spec.Distribution.Modules.Logging.Loki": { "type": "object", + "description": "Configuration for the Loki package.", "additionalProperties": false, "properties": { - "retentionTime": { - "type": "string", - "description": "The retention time for the tempo pods" - }, "backend": { "type": "string", + "description": "The storage backend type for Loki. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external object storage instead of deploying an in-cluster MinIO.", "enum": [ "minio", "externalEndpoint" - ], - "description": "The backend for the tempo pods, must be ***minio*** or ***externalEndpoint***" + ] }, "externalEndpoint": { "type": "object", "additionalProperties": false, + "description": "Configuration for Loki's external storage backend.", "properties": { "endpoint": { "type": "string", - "description": "The endpoint of the external tempo backend" + "description": "External S3-compatible endpoint for Loki's storage." }, "insecure": { "type": "boolean", - "description": "If true, the external tempo backend will not use tls" + "description": "If true, will use HTTP as protocol instead of HTTPS." }, "secretAccessKey": { "type": "string", - "description": "The secret access key of the external tempo backend" + "description": "The secret access key (password) for the external S3-compatible bucket." }, "accessKeyId": { "type": "string", - "description": "The access key id of the external tempo backend" + "description": "The access key ID (username) for the external S3-compatible bucket." }, "bucketName": { "type": "string", - "description": "The bucket name of the external tempo backend" + "description": "The bucket name of the external S3-compatible object storage." } } }, - "overrides": { - "$ref": "#/$defs/Types.FuryModuleComponentOverrides" - } - } - }, - "Spec.Infrastructure": { - "type": "object", - "additionalProperties": false, - "properties": { - "vpc": { - "$ref": "#/$defs/Spec.Infrastructure.Vpc", - "description": "This key defines the VPC that will be created in AWS" - }, - "vpn": { - "$ref": "#/$defs/Spec.Infrastructure.Vpn", - "description": "This section defines the creation of VPN bastions" - } - }, - "allOf": [ - { - "if": { - "allOf": [ - { - "properties": { - "vpc": { - "type": "null" - } - } - }, - { - "not": { - "properties": { - "vpn": { - "type": "null" - } - } - } - } - ] - }, - "then": { - "properties": { - "vpn": { - "required": [ - "vpcId" - ] - } - } - } + "tsdbStartDate": { + "type": "string", + "format": "date", + "description": "Starting from versions 1.28.4, 1.29.5 and 1.30.0 of KFD, Loki will change the time series database from BoltDB to TSDB and the schema from v11 to v13 that it uses to store the logs.\n\nThe value of this field will determine the date when Loki will start writing using the new TSDB and the schema v13, always at midnight UTC. The old BoltDB and schema will be kept until they expire for reading purposes.\n\nValue must be a string in `ISO 8601` date format (`yyyy-mm-dd`). Example: `2024-11-18`." }, - { - "if": { - "allOf": [ - { - "not": { - "properties": { - "vpc": { - "type": "null" - } - } - } - }, - { - "not": { - "properties": { - "vpn": { - "properties": { - "vpcId": { - "type": "null" - } - } - } - } - } - } - ] - }, - "then": { - "properties": { - "vpn": { - "properties": { - "vpcId": { - "type": "null" - } - } - } - } - } - } - ] - }, - "Spec.Infrastructure.Vpc": { - "type": "object", - "additionalProperties": false, - "properties": { - "network": { - "$ref": "#/$defs/Spec.Infrastructure.Vpc.Network" + "resources": { + "$ref": "#/$defs/Types.KubeResources" } }, "required": [ - "network" + "tsdbStartDate" ] }, - "Spec.Infrastructure.Vpc.Network": { + "Spec.Distribution.Modules.Logging.Operator": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Logging Operator.", "properties": { - "cidr": { - "$ref": "#/$defs/Types.Cidr", - "description": "This is the CIDR of the VPC that will be created" - }, - "subnetsCidrs": { - "$ref": "#/$defs/Spec.Infrastructure.Vpc.Network.SubnetsCidrs" + "overrides": { + "$ref": "#/$defs/Types.FuryModuleComponentOverrides" } - }, - "required": [ - "cidr", - "subnetsCidrs" - ] + } }, - "Spec.Infrastructure.Vpc.Network.SubnetsCidrs": { + "Spec.Distribution.Modules.Logging.CustomOutputs": { + "description": "When using the `customOutputs` logging type, you need to manually specify the spec of the several `Output` and `ClusterOutputs` that the Logging Operator expects to forward the logs collected by the pre-defined flows.", "type": "object", "additionalProperties": false, "properties": { - "private": { - "type": "array", - "items": { - "$ref": "#/$defs/Types.Cidr" - }, - "description": "These are the CIRDs for the private subnets, where the nodes, the pods, and the private load balancers will be created" + "audit": { + "type": "string", + "description": "This value defines where the output from the `audit` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, - "public": { - "type": "array", - "items": { - "$ref": "#/$defs/Types.Cidr" - }, - "description": "These are the CIDRs for the public subnets, where the public load balancers and the VPN servers will be created" + "events": { + "type": "string", + "description": "This value defines where the output from the `events` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" + }, + "infra": { + "type": "string", + "description": "This value defines where the output from the `infra` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" + }, + "ingressNginx": { + "type": "string", + "description": "This value defines where the output from the `ingressNginx` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" + }, + "kubernetes": { + "type": "string", + "description": "This value defines where the output from the `kubernetes` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" + }, + "systemdCommon": { + "type": "string", + "description": "This value defines where the output from the `systemdCommon` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" + }, + "systemdEtcd": { + "type": "string", + "description": "This value defines where the output from the `systemdEtcd` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" + }, + "errors": { + "type": "string", + "description": "This value defines where the output from the `errors` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" } }, "required": [ - "private", - "public" + "audit", + "events", + "infra", + "ingressNginx", + "kubernetes", + "systemdCommon", + "systemdEtcd", + "errors" ] }, - "Spec.Infrastructure.Vpn": { + "Spec.Distribution.Modules.Monitoring": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Monitoring module.", "properties": { - "instances": { - "type": "integer", - "description": "The number of instances to create, 0 to skip the creation" + "type": { + "type": "string", + "enum": [ + "none", + "prometheus", + "prometheusAgent", + "mimir" + ], + "description": "The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or `mimir`.\n\n- `none`: will disable the whole monitoring stack.\n- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instance, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more.\n- `prometheusAgent`: will install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster.\n- `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage.\n\nDefault is `prometheus`." }, - "port": { - "$ref": "#/$defs/Types.TcpPort", - "description": "The port used by the OpenVPN server" + "overrides": { + "$ref": "#/$defs/Types.FuryModuleOverrides" }, - "instanceType": { - "type": "string", - "description": "The size of the AWS EC2 instance" + "prometheus": { + "$ref": "#/$defs/Spec.Distribution.Modules.Monitoring.Prometheus" }, - "diskSize": { - "type": "integer", - "description": "The size of the disk in GB" + "prometheusAgent": { + "$ref": "#/$defs/Spec.Distribution.Modules.Monitoring.PrometheusAgent" }, - "operatorName": { - "type": "string", - "description": "The username of the account to create in the bastion's operating system" + "alertmanager": { + "$ref": "#/$defs/Spec.Distribution.Modules.Monitoring.AlertManager" }, - "dhParamsBits": { - "type": "integer", - "description": "The dhParamsBits size used for the creation of the .pem file that will be used in the dh openvpn server.conf file" + "grafana": { + "$ref": "#/$defs/Spec.Distribution.Modules.Monitoring.Grafana" }, - "vpnClientsSubnetCidr": { - "$ref": "#/$defs/Types.Cidr", - "description": "The CIDR that will be used to assign IP addresses to the VPN clients when connected" + "blackboxExporter": { + "$ref": "#/$defs/Spec.Distribution.Modules.Monitoring.BlackboxExporter" }, - "ssh": { - "$ref": "#/$defs/Spec.Infrastructure.Vpn.Ssh" + "kubeStateMetrics": { + "$ref": "#/$defs/Spec.Distribution.Modules.Monitoring.KubeStateMetrics" }, - "vpcId": { - "$ref": "#/$defs/Types.AwsVpcId", - "description": "The VPC ID where the VPN servers will be created, required only if .spec.infrastructure.vpc is omitted" + "x509Exporter": { + "$ref": "#/$defs/Spec.Distribution.Modules.Monitoring.X509Exporter" }, - "bucketNamePrefix": { - "$ref": "#/$defs/Types.AwsS3BucketNamePrefix", - "description": "This value defines the prefix that will be used to create the bucket name where the VPN servers will store the states" + "mimir": { + "$ref": "#/$defs/Spec.Distribution.Modules.Monitoring.Mimir" }, - "iamUserNameOverride": { - "$ref": "#/$defs/Types.AwsIamRoleName", - "description": "Overrides the default IAM user name for the VPN" + "minio": { + "$ref": "#/$defs/Spec.Distribution.Modules.Monitoring.Minio" } }, "required": [ - "ssh", - "vpnClientsSubnetCidr" + "type" ] }, - "Spec.Infrastructure.Vpn.Ssh": { + "Spec.Distribution.Modules.Monitoring.Prometheus": { "type": "object", "additionalProperties": false, "properties": { - "publicKeys": { + "resources": { + "$ref": "#/$defs/Types.KubeResources" + }, + "retentionTime": { + "type": "string", + "description": "The retention time for the `k8s` Prometheus instance." + }, + "retentionSize": { + "type": "string", + "description": "The retention size for the `k8s` Prometheus instance." + }, + "storageSize": { + "type": "string", + "description": "The storage size for the `k8s` Prometheus instance." + }, + "remoteWrite": { + "description": "Set this option to ship the collected metrics to a remote Prometheus receiver.\n\n`remoteWrite` is an array of objects that allows configuring the [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for Prometheus. The objects in the array follow [the same schema as in the prometheus operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec).", "type": "array", "items": { - "anyOf": [ - { - "$ref": "#/$defs/Types.SshPubKey" - }, - { - "$ref": "#/$defs/Types.FileRef" - } - ] - }, - "description": "This value defines the public keys that will be added to the bastion's operating system NOTES: Not yet implemented" + "type": "object" + } + } + } + }, + "Spec.Distribution.Modules.Monitoring.PrometheusAgent": { + "type": "object", + "additionalProperties": false, + "properties": { + "resources": { + "$ref": "#/$defs/Types.KubeResources" }, - "githubUsersName": { + "remoteWrite": { + "description": "Set this option to ship the collected metrics to a remote Prometheus receiver.\n\n`remoteWrite` is an array of objects that allows configuring the [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for Prometheus. The objects in the array follow [the same schema as in the prometheus operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec).", "type": "array", "items": { - "type": "string" - }, - "minItems": 1, - "description": "The github user name list that will be used to get the ssh public key that will be added as authorized key to the operatorName user" + "type": "object" + } + } + } + }, + "Spec.Distribution.Modules.Monitoring.AlertManager": { + "type": "object", + "additionalProperties": false, + "properties": { + "deadManSwitchWebhookUrl": { + "type": "string", + "description": "The webhook URL to send dead man's switch monitoring, for example to use with healthchecks.io." }, - "allowedFromCidrs": { - "type": "array", - "items": { - "$ref": "#/$defs/Types.Cidr" - }, - "description": "The CIDR enabled in the security group that can access the bastions in SSH" + "installDefaultRules": { + "type": "boolean", + "description": "Set to false to avoid installing the Prometheus rules (alerts) included with the distribution." + }, + "slackWebhookUrl": { + "type": "string", + "description": "The Slack webhook URL where to send the infrastructural and workload alerts to." } - }, - "required": [ - "allowedFromCidrs", - "githubUsersName" - ] + } }, - "Spec.Kubernetes": { + "Spec.Distribution.Modules.Monitoring.Grafana": { "type": "object", "additionalProperties": false, "properties": { - "vpcId": { - "$ref": "#/$defs/Types.AwsVpcId", - "description": "This value defines the VPC ID where the EKS cluster will be created, required only if .spec.infrastructure.vpc is omitted" + "usersRoleAttributePath": { + "type": "string", + "description": "[JMESPath](http://jmespath.org/examples.html) expression to retrieve the user's role. Example:\n\n```yaml\nusersRoleAttributePath: \"contains(groups[*], 'beta') && 'Admin' || contains(groups[*], 'gamma') && 'Editor' || contains(groups[*], 'delta') && 'Viewer'\n```\n\nMore details in [Grafana's documentation](https://grafana.com/docs/grafana/latest/setup-grafana/configure-security/configure-authentication/generic-oauth/#configure-role-mapping)." }, - "clusterIAMRoleNamePrefixOverride": { - "$ref": "#/$defs/Types.AwsIamRoleNamePrefix", - "description": "Overrides the default IAM role name prefix for the EKS cluster" + "basicAuthIngress": { + "type": "boolean", + "description": "Setting this to true will deploy an additional `grafana-basic-auth` ingress protected with Grafana's basic auth instead of SSO. It's intended use is as a temporary ingress for when there are problems with the SSO login flow.\n\nNotice that by default anonymous access is enabled." }, - "workersIAMRoleNamePrefixOverride": { - "$ref": "#/$defs/Types.AwsIamRoleNamePrefix", - "description": "Overrides the default IAM role name prefix for the EKS workers" + "overrides": { + "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + } + } + }, + "Spec.Distribution.Modules.Monitoring.BlackboxExporter": { + "type": "object", + "additionalProperties": false, + "properties": { + "overrides": { + "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + } + } + }, + "Spec.Distribution.Modules.Monitoring.KubeStateMetrics": { + "type": "object", + "additionalProperties": false, + "properties": { + "overrides": { + "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + } + } + }, + "Spec.Distribution.Modules.Monitoring.X509Exporter": { + "type": "object", + "additionalProperties": false, + "properties": { + "overrides": { + "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + } + } + }, + "Spec.Distribution.Modules.Monitoring.Mimir": { + "type": "object", + "additionalProperties": false, + "description": "Configuration for the Mimir package.", + "properties": { + "retentionTime": { + "type": "string", + "description": "The retention time for the logs stored in Mimir. Default is `30d`. Value must match the regular expression `[0-9]+(ns|us|µs|ms|s|m|h|d|w|y)` where y = 365 days." }, - "subnetIds": { - "type": "array", - "items": { - "$ref": "#/$defs/Types.AwsSubnetId" - }, - "description": "This value defines the subnet IDs where the EKS cluster will be created, required only if .spec.infrastructure.vpc is omitted" + "backend": { + "type": "string", + "enum": [ + "minio", + "externalEndpoint" + ], + "description": "The storage backend type for Mimir. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external S3-compatible object storage instead of deploying an in-cluster MinIO." }, - "apiServer": { - "$ref": "#/$defs/Spec.Kubernetes.APIServer" + "externalEndpoint": { + "type": "object", + "additionalProperties": false, + "description": "Configuration for Mimir's external storage backend.", + "properties": { + "endpoint": { + "type": "string", + "description": "The external S3-compatible endpoint for Mimir's storage." + }, + "insecure": { + "type": "boolean", + "description": "If true, will use HTTP as protocol instead of HTTPS." + }, + "secretAccessKey": { + "type": "string", + "description": "The secret access key (password) for the external S3-compatible bucket." + }, + "accessKeyId": { + "type": "string", + "description": "The access key ID (username) for the external S3-compatible bucket." + }, + "bucketName": { + "type": "string", + "description": "The bucket name of the external S3-compatible object storage." + } + } }, - "serviceIpV4Cidr": { - "$ref": "#/$defs/Types.Cidr", - "description": "This value defines the CIDR that will be used to assign IP addresses to the services" + "overrides": { + "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + } + } + }, + "Spec.Distribution.Modules.Monitoring.Minio": { + "type": "object", + "description": "Configuration for Monitoring's MinIO deployment.", + "additionalProperties": false, + "properties": { + "storageSize": { + "type": "string", + "description": "The PVC size for each MinIO disk, 6 disks total." }, - "nodeAllowedSshPublicKey": { - "anyOf": [ - { - "$ref": "#/$defs/Types.AwsSshPubKey" + "rootUser": { + "type": "object", + "additionalProperties": false, + "properties": { + "username": { + "type": "string", + "description": "The username for the default MinIO root user." }, - { - "$ref": "#/$defs/Types.FileRef" + "password": { + "type": "string", + "description": "The password for the default MinIO root user." } + } + }, + "overrides": { + "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + } + } + }, + "Spec.Distribution.Modules.Tracing": { + "type": "object", + "additionalProperties": false, + "description": "Configuration for the Tracing module.", + "properties": { + "overrides": { + "$ref": "#/$defs/Types.FuryModuleOverrides" + }, + "type": { + "type": "string", + "enum": [ + "none", + "tempo" ], - "description": "This key contains the ssh public key that can connect to the nodes via SSH using the ec2-user user" + "description": "The type of tracing to use, either `none` or `tempo`. `none` will disable the Tracing module and `tempo` will install a Grafana Tempo deployment.\n\nDefault is `tempo`." }, - "nodePoolsLaunchKind": { + "tempo": { + "$ref": "#/$defs/Spec.Distribution.Modules.Tracing.Tempo" + }, + "minio": { + "$ref": "#/$defs/Spec.Distribution.Modules.Tracing.Minio" + } + }, + "required": [ + "type" + ] + }, + "Spec.Distribution.Modules.Tracing.Tempo": { + "type": "object", + "additionalProperties": false, + "description": "Configuration for the Tempo package.", + "properties": { + "retentionTime": { + "type": "string", + "description": "The retention time for the traces stored in Tempo." + }, + "backend": { "type": "string", "enum": [ - "launch_configurations", - "launch_templates", - "both" + "minio", + "externalEndpoint" ], - "description": "Either `launch_configurations`, `launch_templates` or `both`. For new clusters use `launch_templates`, for existing cluster you'll need to migrate from `launch_configurations` to `launch_templates` using `both` as interim." + "description": "The storage backend type for Tempo. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external S3-compatible object storage instead of deploying an in-cluster MinIO." }, - "logRetentionDays": { - "type": "integer", - "description": "Optional Kubernetes Cluster log retention in days. Defaults to 90 days." + "externalEndpoint": { + "description": "Configuration for Tempo's external storage backend.", + "type": "object", + "additionalProperties": false, + "properties": { + "endpoint": { + "type": "string", + "description": "The external S3-compatible endpoint for Tempo's storage." + }, + "insecure": { + "type": "boolean", + "description": "If true, will use HTTP as protocol instead of HTTPS." + }, + "secretAccessKey": { + "type": "string", + "description": "The secret access key (password) for the external S3-compatible bucket." + }, + "accessKeyId": { + "type": "string", + "description": "The access key ID (username) for the external S3-compatible bucket." + }, + "bucketName": { + "type": "string", + "description": "The bucket name of the external S3-compatible object storage." + } + } }, - "logsTypes": { - "type": "array", - "items": { - "type": "string", - "enum": [ - "api", - "audit", - "authenticator", - "controllerManager", - "scheduler" - ] - }, - "minItems": 0, - "description": "Optional list of Kubernetes Cluster log types to enable. Defaults to all types." + "overrides": { + "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + } + } + }, + "Spec.Distribution.Modules.Tracing.Minio": { + "type": "object", + "description": "Configuration for Tracing's MinIO deployment.", + "additionalProperties": false, + "properties": { + "storageSize": { + "type": "string", + "description": "The PVC size for each MinIO disk, 6 disks total." }, - "nodePools": { - "type": "array", - "items": { - "$ref": "#/$defs/Spec.Kubernetes.NodePool" + "rootUser": { + "type": "object", + "additionalProperties": false, + "properties": { + "username": { + "type": "string", + "description": "The username for the default MinIO root user." + }, + "password": { + "type": "string", + "description": "The password for the default MinIO root user." + } } }, - "awsAuth": { - "$ref": "#/$defs/Spec.Kubernetes.AwsAuth" + "overrides": { + "$ref": "#/$defs/Types.FuryModuleComponentOverrides" } - }, - "required": [ - "apiServer", - "nodeAllowedSshPublicKey", - "nodePools", - "nodePoolsLaunchKind" - ] + } }, - "Spec.Kubernetes.APIServer": { + "Spec.Distribution.Modules.Networking": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Networking module.", "properties": { - "privateAccess": { - "type": "boolean", - "description": "This value defines if the API server will be accessible only from the private subnets" - }, - "privateAccessCidrs": { - "type": "array", - "items": { - "$ref": "#/$defs/Types.Cidr" - }, - "minItems": 0, - "description": "This value defines the CIDRs that will be allowed to access the API server from the private subnets" + "overrides": { + "$ref": "#/$defs/Types.FuryModuleOverrides" }, - "publicAccessCidrs": { - "type": "array", - "items": { - "$ref": "#/$defs/Types.Cidr" - }, - "minItems": 0, - "description": "This value defines the CIDRs that will be allowed to access the API server from the public subnets" + "tigeraOperator": { + "$ref": "#/$defs/Spec.Distribution.Modules.Networking.TigeraOperator" }, - "publicAccess": { - "type": "boolean", - "description": "This value defines if the API server will be accessible from the public subnets" + "type": { + "type": "string", + "enum": [ + "none" + ] } - }, - "required": [ - "privateAccess", - "publicAccess" - ] + } }, - "Spec.Kubernetes.AwsAuth": { + "Spec.Distribution.Modules.Networking.TigeraOperator": { "type": "object", "additionalProperties": false, "properties": { - "additionalAccounts": { - "type": "array", - "items": { - "type": "string" - }, - "description": "This optional array defines additional AWS accounts that will be added to the aws-auth configmap" - }, - "users": { - "type": "array", - "items": { - "$ref": "#/$defs/Spec.Kubernetes.AwsAuth.User" - }, - "description": "This optional array defines additional IAM users that will be added to the aws-auth configmap" - }, - "roles": { - "type": "array", - "items": { - "$ref": "#/$defs/Spec.Kubernetes.AwsAuth.Role" - }, - "description": "This optional array defines additional IAM roles that will be added to the aws-auth configmap" + "overrides": { + "$ref": "#/$defs/Types.FuryModuleComponentOverrides" } } }, - "Spec.Kubernetes.AwsAuth.Role": { + "Spec.Distribution.Modules.Policy": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Policy module.", "properties": { - "username": { - "type": "string" + "overrides": { + "$ref": "#/$defs/Types.FuryModuleOverrides" }, - "groups": { - "type": "array", - "items": { - "type": "string" - } + "type": { + "type": "string", + "enum": [ + "none", + "gatekeeper", + "kyverno" + ], + "description": "The type of policy enforcement to use, either `none`, `gatekeeper` or `kyverno`.\n\nDefault is `none`." }, - "rolearn": { - "$ref": "#/$defs/Types.AwsArn" + "gatekeeper": { + "$ref": "#/$defs/Spec.Distribution.Modules.Policy.Gatekeeper" + }, + "kyverno": { + "$ref": "#/$defs/Spec.Distribution.Modules.Policy.Kyverno" } }, "required": [ - "groups", - "rolearn", - "username" + "type" + ], + "allOf": [ + { + "if": { + "properties": { + "type": { + "const": "gatekeeper" + } + } + }, + "then": { + "required": [ + "gatekeeper" + ] + } + }, + { + "if": { + "properties": { + "type": { + "const": "kyverno" + } + } + }, + "then": { + "required": [ + "kyverno" + ] + } + } ] }, - "Spec.Kubernetes.AwsAuth.User": { + "Spec.Distribution.Modules.Policy.Gatekeeper": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Gatekeeper package.", "properties": { - "username": { - "type": "string" - }, - "groups": { + "additionalExcludedNamespaces": { "type": "array", "items": { "type": "string" - } + }, + "description": "This parameter adds namespaces to Gatekeeper's exemption list, so it will not enforce the constraints on them." }, - "userarn": { - "$ref": "#/$defs/Types.AwsArn" + "enforcementAction": { + "type": "string", + "enum": [ + "deny", + "dryrun", + "warn" + ], + "description": "The default enforcement action to use for the included constraints. `deny` will block the admission when violations to the policies are found, `warn` will show a message to the user but will admit the violating requests and `dryrun` won't give any feedback to the user but it will log the violations." + }, + "installDefaultPolicies": { + "type": "boolean", + "description": "Set to `false` to avoid installing the default Gatekeeper policies (constraints templates and constraints) included with the distribution." + }, + "overrides": { + "$ref": "#/$defs/Types.FuryModuleComponentOverrides" } }, "required": [ - "groups", - "userarn", - "username" + "enforcementAction", + "installDefaultPolicies" ] }, - "Spec.Kubernetes.NodePool": { + "Spec.Distribution.Modules.Policy.Kyverno": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Kyverno package.", "properties": { - "type": { - "type": "string", - "enum": [ - "eks-managed", - "self-managed" - ] - }, - "name": { - "type": "string", - "description": "The name of the node pool" - }, - "ami": { - "$ref": "#/$defs/Spec.Kubernetes.NodePool.Ami" - }, - "containerRuntime": { - "type": "string", - "enum": [ - "docker", - "containerd" - ], - "description": "The container runtime to use for the nodes" - }, - "size": { - "$ref": "#/$defs/Spec.Kubernetes.NodePool.Size" - }, - "instance": { - "$ref": "#/$defs/Spec.Kubernetes.NodePool.Instance" - }, - "attachedTargetGroups": { + "additionalExcludedNamespaces": { "type": "array", "items": { - "$ref": "#/$defs/Types.AwsArn" + "type": "string" }, - "description": "This optional array defines additional target groups to attach to the instances in the node pool" + "description": "This parameter adds namespaces to Kyverno's exemption list, so it will not enforce the policies on them." }, - "labels": { - "$ref": "#/$defs/Types.KubeLabels", - "description": "Kubernetes labels that will be added to the nodes" - }, - "taints": { - "$ref": "#/$defs/Types.KubeTaints", - "description": "Kubernetes taints that will be added to the nodes" - }, - "tags": { - "$ref": "#/$defs/Types.AwsTags", - "description": "AWS tags that will be added to the ASG and EC2 instances" + "validationFailureAction": { + "type": "string", + "enum": [ + "Audit", + "Enforce" + ], + "description": "The validation failure action to use for the policies, `Enforce` will block when a request does not comply with the policies and `Audit` will not block but log when a request does not comply with the policies." }, - "subnetIds": { - "type": "array", - "items": { - "$ref": "#/$defs/Types.AwsSubnetId" - }, - "description": "This value defines the subnet IDs where the nodes will be created" + "installDefaultPolicies": { + "type": "boolean", + "description": "Set to `false` to avoid installing the default Kyverno policies included with distribution." }, - "additionalFirewallRules": { - "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRules" + "overrides": { + "$ref": "#/$defs/Types.FuryModuleComponentOverrides" } }, "required": [ - "instance", - "name", - "size" + "validationFailureAction", + "installDefaultPolicies" ] }, - "Spec.Kubernetes.NodePool.AdditionalFirewallRule.CidrBlock": { + "Spec.Distribution.Modules.Dr": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Disaster Recovery module.", "properties": { - "name": { - "type": "string" + "overrides": { + "$ref": "#/$defs/Types.FuryModuleOverrides" }, "type": { "type": "string", "enum": [ - "ingress", - "egress" - ] - }, - "tags": { - "$ref": "#/$defs/Types.AwsTags" - }, - "cidrBlocks": { - "type": "array", - "items": { - "$ref": "#/$defs/Types.Cidr" - }, - "minItems": 1 - }, - "protocol": { - "$ref": "#/$defs/Types.AwsIpProtocol" + "none", + "eks" + ], + "description": "The type of the Disaster Recovery, must be `none` or `eks`. `none` disables the module and `eks` will install Velero and use an S3 bucket to store the backups.\n\nDefault is `none`." }, - "ports": { - "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRule.Ports" + "velero": { + "$ref": "#/$defs/Spec.Distribution.Modules.Dr.Velero" } }, "required": [ - "cidrBlocks", - "name", - "ports", - "protocol", "type" - ] + ], + "if": { + "properties": { + "type": { + "const": "eks" + } + } + }, + "then": { + "required": [ + "type", + "velero" + ] + } }, - "Spec.Kubernetes.NodePool.AdditionalFirewallRule.Ports": { + "Spec.Distribution.Modules.Dr.Velero": { "type": "object", "additionalProperties": false, "properties": { - "from": { - "$ref": "#/$defs/Types.TcpPort" + "schedules": { + "type": "object", + "additionalProperties": false, + "description": "Configuration for Velero's backup schedules.", + "properties": { + "install": { + "type": "boolean", + "description": "Whether to install or not the default `manifests` and `full` backups schedules. Default is `true`." + }, + "definitions": { + "type": "object", + "additionalProperties": false, + "description": "Configuration for Velero schedules.", + "properties": { + "manifests": { + "type": "object", + "additionalProperties": false, + "description": "Configuration for Velero's manifests backup schedule.", + "properties": { + "schedule": { + "type": "string", + "description": "The cron expression for the `manifests` backup schedule (default `*/15 * * * *`)." + }, + "ttl": { + "type": "string", + "description": "The Time To Live (TTL) of the backups created by the backup schedules (default `720h0m0s`, 30 days). Notice that changing this value will affect only newly created backups, prior backups will keep the old TTL." + } + } + }, + "full": { + "type": "object", + "additionalProperties": false, + "description": "Configuration for Velero's manifests backup schedule.", + "properties": { + "schedule": { + "type": "string", + "description": "The cron expression for the `full` backup schedule (default `0 1 * * *`)." + }, + "ttl": { + "type": "string", + "description": "The Time To Live (TTL) of the backups created by the backup schedules (default `720h0m0s`, 30 days). Notice that changing this value will affect only newly created backups, prior backups will keep the old TTL." + }, + "snapshotMoveData": { + "type": "boolean", + "description": "EXPERIMENTAL (if you do more than one backups, the following backups after the first are not automatically restorable, see https://github.com/vmware-tanzu/velero/issues/7057#issuecomment-2466815898 for the manual restore solution): SnapshotMoveData specifies whether snapshot data should be moved. Velero will create a new volume from the snapshot and upload the content to the storageLocation." + } + } + } + } + } + } }, - "to": { - "$ref": "#/$defs/Types.TcpPort" + "eks": { + "$ref": "#/$defs/Spec.Distribution.Modules.Dr.Velero.Eks" + }, + "overrides": { + "$ref": "#/$defs/Types.FuryModuleComponentOverrides" } }, "required": [ - "from", - "to" + "eks" ] }, - "Spec.Kubernetes.NodePool.AdditionalFirewallRule.Self": { + "Spec.Distribution.Modules.Dr.Velero.Eks": { "type": "object", "additionalProperties": false, "properties": { - "name": { - "type": "string", - "description": "The name of the FW rule" - }, - "type": { - "type": "string", - "enum": [ - "ingress", - "egress" - ], - "description": "The type of the FW rule can be ingress or egress" - }, - "tags": { - "$ref": "#/$defs/Types.AwsTags", - "description": "The tags of the FW rule" - }, - "self": { - "type": "boolean", - "description": "If true, the source will be the security group itself" + "region": { + "$ref": "#/$defs/Types.AwsRegion", + "description": "The region where the bucket for Velero will be located." }, - "protocol": { - "$ref": "#/$defs/Types.AwsIpProtocol", - "description": "The protocol of the FW rule" + "bucketName": { + "$ref": "#/$defs/Types.AwsS3BucketName", + "maxLength": 49, + "description": "The name of the bucket for Velero." }, - "ports": { - "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRule.Ports" + "iamRoleArn": { + "$ref": "#/$defs/Types.AwsArn" } }, "required": [ - "self", - "name", - "ports", - "protocol", - "type" + "iamRoleArn", + "region", + "bucketName" ] }, - "Spec.Kubernetes.NodePool.AdditionalFirewallRule.SourceSecurityGroupId": { + "Spec.Distribution.Modules.Auth": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Auth module.", "properties": { - "name": { - "type": "string", - "description": "The name of the FW rule" - }, - "type": { - "type": "string", - "enum": [ - "ingress", - "egress" - ], - "description": "The type of the FW rule can be ingress or egress" + "overrides": { + "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Overrides" }, - "tags": { - "$ref": "#/$defs/Types.AwsTags", - "description": "The tags of the FW rule" + "provider": { + "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Provider" }, - "sourceSecurityGroupId": { + "baseDomain": { "type": "string", - "description": "The source security group ID" + "description": "The base domain for the ingresses created by the Auth module (Gangplank, Pomerium, Dex). Notice that when the ingress module type is `dual`, these will use the `external` ingress class." }, - "protocol": { - "$ref": "#/$defs/Types.AwsIpProtocol", - "description": "The protocol of the FW rule" + "pomerium": { + "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Pomerium" }, - "ports": { - "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRule.Ports" + "dex": { + "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Dex" } }, "required": [ - "sourceSecurityGroupId", - "name", - "ports", - "protocol", - "type" + "provider" + ], + "allOf": [ + { + "if": { + "properties": { + "provider": { + "properties": { + "type": { + "const": "sso" + } + } + } + } + }, + "then": { + "required": [ + "dex", + "pomerium", + "baseDomain" + ] + }, + "else": { + "properties": { + "dex": { + "type": "null" + }, + "pomerium": { + "type": "null" + } + } + } + }, + { + "if": { + "properties": { + "provider": { + "properties": { + "type": { + "const": "basicAuth" + } + } + } + } + }, + "then": { + "properties": { + "provider": { + "required": [ + "basicAuth" + ] + } + } + }, + "else": { + "properties": { + "provider": { + "basicAuth": { + "type": "null" + } + } + } + } + } ] }, - "Spec.Kubernetes.NodePool.AdditionalFirewallRules": { + "Spec.Distribution.Modules.Auth.Overrides": { "type": "object", "additionalProperties": false, + "description": "Override the common configuration with a particular configuration for the Auth module.", "properties": { - "cidrBlocks": { - "type": "array", - "items": { - "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRule.CidrBlock" - }, - "minItems": 1, - "description": "The CIDR blocks for the FW rule. At the moment the first item of the list will be used, others will be ignored." + "nodeSelector": { + "$ref": "#/$defs/Types.KubeNodeSelector", + "description": "Set to override the node selector used to place the pods of the Auth module." }, - "sourceSecurityGroupId": { - "type": "array", + "tolerations": { + "type": [ + "array", + "null" + ], "items": { - "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRule.SourceSecurityGroupId" + "$ref": "#/$defs/Types.KubeToleration" }, - "minItems": 1 + "description": "Set to override the tolerations that will be added to the pods of the Auth module." }, - "self": { - "type": "array", - "items": { - "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRule.Self" - }, - "minItems": 1 + "ingresses": { + "type": "object", + "description": "Override the definition of the Auth module ingresses.", + "additionalProperties": { + "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Overrides.Ingress" + } } } }, - "Spec.Kubernetes.NodePool.Ami": { + "Spec.Distribution.Modules.Auth.Overrides.Ingress": { "type": "object", "additionalProperties": false, "properties": { - "id": { + "host": { "type": "string", - "description": "The AMI ID to use for the nodes" + "description": "Use this host for the ingress instead of the default one." }, - "owner": { + "ingressClass": { "type": "string", - "description": "The owner of the AMI" + "description": "Use this ingress class for the ingress instead of the default one." } }, "required": [ - "id", - "owner" + "host", + "ingressClass" ] }, - "Spec.Kubernetes.NodePool.Instance": { + "Spec.Distribution.Modules.Auth.Provider": { "type": "object", "additionalProperties": false, "properties": { "type": { - "type": "string", - "description": "The instance type to use for the nodes" - }, - "spot": { - "type": "boolean", - "description": "If true, the nodes will be created as spot instances" - }, - "volumeSize": { - "type": "integer", - "description": "The size of the disk in GB" - }, - "volumeType": { "type": "string", "enum": [ - "gp2", - "gp3", - "io1", - "standard" - ] - }, - "maxPods": { - "type": "integer" - } - }, - "required": [ - "type" - ] - }, - "Spec.Kubernetes.NodePool.Size": { - "type": "object", - "additionalProperties": false, - "properties": { - "min": { - "type": "integer", - "minimum": 0, - "description": "The minimum number of nodes in the node pool" + "none", + "basicAuth", + "sso" + ], + "description": "The type of the Auth provider, options are:\n- `none`: will disable authentication in the infrastructural ingresses.\n- `sso`: will protect the infrastructural ingresses with Pomerium and Dex (SSO) and require authentication before accessing them.\n- `basicAuth`: will protect the infrastructural ingresses with HTTP basic auth (username and password) authentication.\n\nDefault is `none`." }, - "max": { - "type": "integer", - "minimum": 0, - "description": "The maximum number of nodes in the node pool" - } - }, - "required": [ - "max", - "min" - ] - }, - "Spec.ToolsConfiguration": { - "type": "object", - "additionalProperties": false, - "properties": { - "terraform": { - "$ref": "#/$defs/Spec.ToolsConfiguration.Terraform" + "basicAuth": { + "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Provider.BasicAuth" } }, "required": [ - "terraform" + "type" ] }, - "Spec.ToolsConfiguration.Terraform": { + "Spec.Distribution.Modules.Auth.Provider.BasicAuth": { "type": "object", "additionalProperties": false, + "description": "Configuration for the HTTP Basic Auth provider.", "properties": { - "state": { - "$ref": "#/$defs/Spec.ToolsConfiguration.Terraform.State" + "username": { + "type": "string", + "description": "The username for logging in with the HTTP basic authentication." + }, + "password": { + "type": "string", + "description": "The password for logging in with the HTTP basic authentication." } }, "required": [ - "state" + "username", + "password" ] }, - "Spec.ToolsConfiguration.Terraform.State": { + "Spec.Distribution.Modules.Auth.Pomerium": { + "$ref": "../public/spec-distribution-modules-auth-pomerium.json" + }, + "Spec.Distribution.Modules.Auth.Dex": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Dex package.", "properties": { - "s3": { - "$ref": "#/$defs/Spec.ToolsConfiguration.Terraform.State.S3" + "connectors": { + "type": "array", + "description": "A list with each item defining a Dex connector. Follows Dex connectors configuration format: https://dexidp.io/docs/connectors/" + }, + "additionalStaticClients": { + "type": "array", + "description": "Additional static clients defitions that will be added to the default clients included with the distribution in Dex's configuration. Example:\n\n```yaml\nadditionalStaticClients:\n - id: my-custom-client\n name: \"A custom additional static client\"\n redirectURIs:\n - \"https://myapp.tld/redirect\"\n - \"https://alias.tld/oidc-callback\"\n secret: supersecretpassword\n```\nReference: https://dexidp.io/docs/connectors/local/" + }, + "expiry": { + "type": "object", + "additionalProperties": false, + "properties": { + "signingKeys": { + "type": "string", + "description": "Dex signing key expiration time duration (default 6h)." + }, + "idTokens": { + "type": "string", + "description": "Dex ID tokens expiration time duration (default 24h)." + } + } + }, + "overrides": { + "$ref": "#/$defs/Types.FuryModuleComponentOverrides" } }, "required": [ - "s3" + "connectors" ] }, - "Spec.ToolsConfiguration.Terraform.State.S3": { + "Spec.Distribution.Modules.Aws": { "type": "object", "additionalProperties": false, "properties": { - "bucketName": { - "$ref": "#/$defs/Types.AwsS3BucketName", - "description": "This value defines which bucket will be used to store all the states" + "clusterAutoscaler": { + "$ref": "#/$defs/Spec.Distribution.Modules.Aws.ClusterAutoscaler" }, - "keyPrefix": { - "$ref": "#/$defs/Types.AwsS3KeyPrefix", - "description": "This value defines which folder will be used to store all the states inside the bucket" + "ebsCsiDriver": { + "type": "object", + "additionalProperties": false, + "properties": { + "iamRoleArn": { + "$ref": "#/$defs/Types.AwsArn" + }, + "overrides": { + "$ref": "#/$defs/Types.FuryModuleComponentOverridesWithIAMRoleName" + } + }, + "required": [ + "iamRoleArn" + ] }, - "region": { - "$ref": "#/$defs/Types.AwsRegion", - "description": "This value defines in which region the bucket is located" + "loadBalancerController": { + "type": "object", + "additionalProperties": false, + "properties": { + "iamRoleArn": { + "$ref": "#/$defs/Types.AwsArn" + }, + "overrides": { + "$ref": "#/$defs/Types.FuryModuleComponentOverridesWithIAMRoleName" + } + }, + "required": [ + "iamRoleArn" + ] }, - "skipRegionValidation": { - "type": "boolean", - "description": "This value defines if the region of the bucket should be validated or not by Terraform, useful when using a bucket in a recently added region" + "ebsSnapshotController": { + "type": "object", + "additionalProperties": false, + "properties": { + "overrides": { + "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + } + } + }, + "overrides": { + "$ref": "#/$defs/Types.FuryModuleOverrides" } }, "required": [ - "bucketName", - "keyPrefix", - "region" + "clusterAutoscaler", + "ebsCsiDriver", + "loadBalancerController", + "overrides" ] }, - "Types.AwsArn": { + "Types.SemVer": { "type": "string", - "pattern": "^arn:(?P[^:\\n]*):(?P[^:\\n]*):(?P[^:\\n]*):(?P[^:\\n]*):(?P(?P[^:\\/\\n]*)[:\\/])?(?P.*)$" + "pattern": "^v?(?P0|[1-9]\\d*)\\.(?P0|[1-9]\\d*)\\.(?P0|[1-9]\\d*)(?:-(?P(?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\\.(?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\\+(?P[0-9a-zA-Z-]+(?:\\.[0-9a-zA-Z-]+)*))?$" }, - "Types.AwsIamRoleName": { + "Types.IpAddress": { "type": "string", - "pattern": "^[a-zA-Z0-9+=,.@_-]{1,63}$" + "pattern": "^((25[0-5]|(2[0-4]|1\\d|[1-9]|)\\d)\\.?\b){4}$" }, - "Types.AwsIamRoleNamePrefix": { + "Types.Cidr": { "type": "string", - "pattern": "^[a-zA-Z0-9+=,.@_-]{1,38}$" + "pattern": "^((25[0-5]|(2[0-4]|1\\d|[1-9]|)\\d)\\.?\\b){4}\\/(3[0-2]|[1-2][0-9]|[0-9])$" }, - "Types.AwsIpProtocol": { + "Types.FileRef": { "type": "string", - "pattern": "^(?i)(tcp|udp|icmp|icmpv6|-1)$", - "$comment": "this value should be lowercase, but we rely on terraform to do the conversion to make it a bit more user friendly" + "pattern": "^\\{file\\:\\/\\/.+\\}$" + }, + "Types.EnvRef": { + "type": "string", + "pattern": "\\{^env\\:\\/\\/.*\\}$" + }, + "Types.TcpPort": { + "type": "integer", + "minimum": 0, + "maximum": 65535 + }, + "Types.SshPubKey": { + "type": "string", + "pattern": "^ssh\\-(dsa|ecdsa|ecdsa-sk|ed25519|ed25519-sk|rsa)\\s+" + }, + "Types.Uri": { + "type": "string", + "pattern": "^(http|https)\\:\\/\\/.+$" + }, + "Types.AwsArn": { + "type": "string", + "pattern": "^arn:(?P[^:\\n]*):(?P[^:\\n]*):(?P[^:\\n]*):(?P[^:\\n]*):(?P(?P[^:\\/\\n]*)[:\\/])?(?P.*)$" }, "Types.AwsRegion": { "type": "string", @@ -2499,6 +2784,37 @@ "us-west-2" ] }, + "Types.AwsVpcId": { + "type": "string", + "pattern": "^vpc\\-([0-9a-f]{8}|[0-9a-f]{17})$" + }, + "Types.AwsSshPubKey": { + "type": "string", + "pattern": "^ssh\\-(ed25519|rsa)\\s+" + }, + "Types.AwsSubnetId": { + "type": "string", + "pattern": "^subnet\\-[0-9a-f]{17}$" + }, + "Types.AwsTags": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "Types.AwsIpProtocol": { + "type": "string", + "pattern": "^(?i)(tcp|udp|icmp|icmpv6|-1)$", + "$comment": "this value should be lowercase, but we rely on terraform to do the conversion to make it a bit more user friendly" + }, + "Types.AwsIamRoleNamePrefix": { + "type": "string", + "pattern": "^[a-zA-Z0-9+=,.@_-]{1,38}$" + }, + "Types.AwsIamRoleName": { + "type": "string", + "pattern": "^[a-zA-Z0-9+=,.@_-]{1,63}$" + }, "Types.AwsS3BucketName": { "type": "string", "allOf": [ @@ -2506,67 +2822,139 @@ "pattern": "^[a-z0-9][a-z0-9-.]{1,61}[a-z0-9]$" }, { - "not": { - "pattern": "^xn--|-s3alias$" - } + "not": { + "pattern": "^xn--|-s3alias$" + } + } + ] + }, + "Types.AwsS3BucketNamePrefix": { + "type": "string", + "allOf": [ + { + "pattern": "^[a-z0-9][a-z0-9-.]{1,35}[a-z0-9-.]$" + }, + { + "not": { + "pattern": "^xn--|-s3alias$" + } + } + ] + }, + "Types.AwsS3KeyPrefix": { + "type": "string", + "pattern": "^[A-z0-9][A-z0-9!-_.*'()]+$", + "maxLength": 960 + }, + "Types.KubeLabels": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "Types.KubeTaints": { + "type": "array", + "items": { + "type": "string", + "pattern": "^([a-zA-Z0-9\\-\\.\\/]+)=([^-][\\w-]+):(NoSchedule|PreferNoSchedule|NoExecute)$" + } + }, + "Types.KubeNodeSelector": { + "type": [ + "object", + "null" + ], + "additionalProperties": { + "type": "string" + } + }, + "Types.KubeToleration": { + "type": "object", + "additionalProperties": false, + "properties": { + "effect": { + "type": "string", + "enum": [ + "NoSchedule", + "PreferNoSchedule", + "NoExecute" + ] + }, + "operator": { + "type": "string", + "enum": [ + "Exists", + "Equal" + ] + }, + "key": { + "type": "string", + "description": "The key of the toleration" + }, + "value": { + "type": "string", + "description": "The value of the toleration" + } + }, + "required": [ + "effect", + "key" + ], + "anyOf": [ + { + "required": [ + "operator" + ] + }, + { + "required": [ + "value" + ] } ] }, - "Types.AwsS3BucketNamePrefix": { - "type": "string", - "allOf": [ - { - "pattern": "^[a-z0-9][a-z0-9-.]{1,35}[a-z0-9-.]$" + "Types.KubeResources": { + "type": "object", + "additionalProperties": false, + "properties": { + "requests": { + "type": "object", + "additionalProperties": false, + "properties": { + "cpu": { + "type": "string", + "description": "The CPU request for the Pod, in cores. Example: `500m`." + }, + "memory": { + "type": "string", + "description": "The memory request for the Pod. Example: `500M`." + } + } }, - { - "not": { - "pattern": "^xn--|-s3alias$" + "limits": { + "type": "object", + "additionalProperties": false, + "properties": { + "cpu": { + "type": "string", + "description": "The CPU limit for the Pod. Example: `1000m`." + }, + "memory": { + "type": "string", + "description": "The memory limit for the Pod. Example: `1G`." + } } } - ] - }, - "Types.AwsS3KeyPrefix": { - "type": "string", - "pattern": "^[A-z0-9][A-z0-9!-_.*'()]+$", - "maxLength": 960 - }, - "Types.AwsSshPubKey": { - "type": "string", - "pattern": "^ssh\\-(ed25519|rsa)\\s+" - }, - "Types.AwsSubnetId": { - "type": "string", - "pattern": "^subnet\\-[0-9a-f]{17}$" - }, - "Types.AwsTags": { - "type": "object", - "additionalProperties": { - "type": "string" } }, - "Types.AwsVpcId": { - "type": "string", - "pattern": "^vpc\\-([0-9a-f]{8}|[0-9a-f]{17})$" - }, - "Types.Cidr": { - "type": "string", - "pattern": "^((25[0-5]|(2[0-4]|1\\d|[1-9]|)\\d)\\.?\\b){4}\\/(3[0-2]|[1-2][0-9]|[0-9])$" - }, - "Types.EnvRef": { - "type": "string", - "pattern": "\\{^env\\:\\/\\/.*\\}$" - }, - "Types.FileRef": { - "type": "string", - "pattern": "^\\{file\\:\\/\\/.+\\}$" - }, - "Types.FuryModuleComponentOverrides": { + "Types.FuryModuleOverrides": { "type": "object", + "description": "Override the common configuration with a particular configuration for the module.", "additionalProperties": false, "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the minio module" + "description": "Set to override the node selector used to place the pods of the module." }, "tolerations": { "type": [ @@ -2576,17 +2964,23 @@ "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for the cert-manager module" + "description": "Set to override the tolerations that will be added to the pods of the module." + }, + "ingresses": { + "type": "object", + "additionalProperties": { + "$ref": "#/$defs/Types.FuryModuleOverridesIngress" + } } } }, - "Types.FuryModuleComponentOverridesWithIAMRoleName": { + "Types.FuryModuleComponentOverrides": { "type": "object", "additionalProperties": false, "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the load balancer controller module" + "description": "Set to override the node selector used to place the pods of the package." }, "tolerations": { "type": [ @@ -2596,20 +2990,17 @@ "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for the cluster autoscaler module" - }, - "iamRoleName": { - "$ref": "#/$defs/Types.AwsIamRoleName" + "description": "Set to override the tolerations that will be added to the pods of the package." } } }, - "Types.FuryModuleOverrides": { + "Types.FuryModuleComponentOverridesWithIAMRoleName": { "type": "object", "additionalProperties": false, "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the dr module" + "description": "The node selector to use to place the pods for the load balancer controller module." }, "tolerations": { "type": [ @@ -2619,13 +3010,10 @@ "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for the monitoring module" + "description": "The tolerations that will be added to the pods for the cluster autoscaler module." }, - "ingresses": { - "type": "object", - "additionalProperties": { - "$ref": "#/$defs/Types.FuryModuleOverridesIngress" - } + "iamRoleName": { + "$ref": "#/$defs/Types.AwsIamRoleName" } } }, @@ -2635,167 +3023,68 @@ "properties": { "disableAuth": { "type": "boolean", - "description": "If true, the ingress will not have authentication" + "description": "If true, the ingress will not have authentication even if `.spec.modules.auth.provider.type` is SSO or Basic Auth." }, "host": { "type": "string", - "description": "The host of the ingress" + "description": "Use this host for the ingress instead of the default one." }, "ingressClass": { "type": "string", - "description": "The ingress class of the ingress" + "description": "Use this ingress class for the ingress instead of the default one." } } }, - "Types.IpAddress": { - "type": "string", - "pattern": "^((25[0-5]|(2[0-4]|1\\d|[1-9]|)\\d)\\.?\b){4}$" - }, - "Types.KubeLabels": { - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "Types.KubeNodeSelector": { - "type": [ - "object", - "null" - ], - "additionalProperties": { - "type": "string" - } - }, - "Types.KubeResources": { + "Spec.Distribution.Modules.Aws.ClusterAutoscaler": { "type": "object", "additionalProperties": false, "properties": { - "requests": { - "type": "object", - "additionalProperties": false, - "properties": { - "cpu": { - "type": "string", - "description": "The cpu request for the prometheus pods" - }, - "memory": { - "type": "string", - "description": "The memory request for the opensearch pods" - } - } + "iamRoleArn": { + "$ref": "#/$defs/Types.AwsArn" }, - "limits": { - "type": "object", - "additionalProperties": false, - "properties": { - "cpu": { - "type": "string", - "description": "The cpu limit for the opensearch pods" - }, - "memory": { - "type": "string", - "description": "The memory limit for the opensearch pods" - } - } + "overrides": { + "$ref": "#/$defs/Types.FuryModuleComponentOverridesWithIAMRoleName" } - } - }, - "Types.KubeTaints": { - "type": "array", - "items": { - "type": "string", - "pattern": "^([a-zA-Z0-9\\-\\.\\/]+)=([^-][\\w-]+):(NoSchedule|PreferNoSchedule|NoExecute)$" - } + }, + "required": [ + "iamRoleArn" + ] }, - "Types.KubeToleration": { + "Spec.Distribution.Modules.Ingress.ClusterIssuer.Route53": { "type": "object", "additionalProperties": false, "properties": { - "effect": { - "type": "string", - "enum": [ - "NoSchedule", - "PreferNoSchedule", - "NoExecute" - ] - }, - "operator": { - "type": "string", - "enum": [ - "Exists", - "Equal" - ] + "iamRoleArn": { + "$ref": "#/$defs/Types.AwsArn" }, - "key": { - "type": "string", - "description": "The key of the toleration" + "region": { + "$ref": "#/$defs/Types.AwsRegion" }, - "value": { - "type": "string", - "description": "The value of the toleration" + "hostedZoneId": { + "type": "string" } }, "required": [ - "effect", - "key" - ], - "anyOf": [ - { - "required": [ - "operator" - ] - }, - { - "required": [ - "value" - ] - } + "hostedZoneId", + "iamRoleArn", + "region" ] }, - "Types.SemVer": { - "type": "string", - "pattern": "^v?(?P0|[1-9]\\d*)\\.(?P0|[1-9]\\d*)\\.(?P0|[1-9]\\d*)(?:-(?P(?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\\.(?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\\+(?P[0-9a-zA-Z-]+(?:\\.[0-9a-zA-Z-]+)*))?$" - }, - "Types.SshPubKey": { - "type": "string", - "pattern": "^ssh\\-(dsa|ecdsa|ecdsa-sk|ed25519|ed25519-sk|rsa)\\s+" - }, - "Types.TcpPort": { - "type": "integer", - "minimum": 0, - "maximum": 65535 - }, - "Types.Uri": { - "type": "string", - "pattern": "^(http|https)\\:\\/\\/.+$" - } - }, - "$schema": "http://json-schema.org/draft-07/schema#", - "additionalProperties": false, - "description": "A Fury Cluster deployed through AWS's Elastic Kubernetes Service", - "properties": { - "apiVersion": { - "type": "string", - "pattern": "^kfd\\.sighup\\.io/v\\d+((alpha|beta)\\d+)?$" - }, - "kind": { - "type": "string", - "enum": [ - "EKSCluster" + "Spec.Distribution.Modules.Ingress.ExternalDNS": { + "type": "object", + "additionalProperties": false, + "properties": { + "privateIamRoleArn": { + "$ref": "#/$defs/Types.AwsArn" + }, + "publicIamRoleArn": { + "$ref": "#/$defs/Types.AwsArn" + } + }, + "required": [ + "privateIamRoleArn", + "publicIamRoleArn" ] - }, - "metadata": { - "$ref": "#/$defs/Metadata" - }, - "spec": { - "$ref": "#/$defs/Spec" } - }, - "required": [ - "apiVersion", - "kind", - "metadata", - "spec" - ], - "type": "object" + } } diff --git a/schemas/public/ekscluster-kfd-v1alpha2.json b/schemas/public/ekscluster-kfd-v1alpha2.json index 74266be21..d37497bc4 100644 --- a/schemas/public/ekscluster-kfd-v1alpha2.json +++ b/schemas/public/ekscluster-kfd-v1alpha2.json @@ -1,6 +1,6 @@ { "$schema": "http://json-schema.org/draft-07/schema#", - "description": "A Fury Cluster deployed through AWS's Elastic Kubernetes Service", + "description": "A KFD Cluster deployed on top of AWS's Elastic Kubernetes Service (EKS).", "type": "object", "properties": { "apiVersion": { @@ -34,6 +34,7 @@ "properties": { "name": { "type": "string", + "description": "The name of the cluster. It will also be used as a prefix for all the other resources created.", "minLength": 1, "maxLength": 56 } @@ -48,17 +49,20 @@ "properties": { "distributionVersion": { "type": "string", + "description": "Defines which KFD version will be installed and, in consequence, the Kubernetes version used to create the cluster. It supports git tags and branches. Example: `v1.30.1`.", "minLength": 1 }, "region": { - "$ref": "#/$defs/Types.AwsRegion" + "$ref": "#/$defs/Types.AwsRegion", + "description": "Defines in which AWS region the cluster and all the related resources will be created." }, "tags": { "$ref": "#/$defs/Types.AwsTags", "description": "This map defines which will be the common tags that will be added to all the resources created on AWS." }, "toolsConfiguration": { - "$ref": "#/$defs/Spec.ToolsConfiguration" + "$ref": "#/$defs/Spec.ToolsConfiguration", + "description": "Configuration for tools used by furyctl, like Terraform." }, "infrastructure": { "$ref": "#/$defs/Spec.Infrastructure" @@ -155,6 +159,7 @@ "Spec.ToolsConfiguration.Terraform.State": { "type": "object", "additionalProperties": false, + "description": "Configuration for storing the Terraform state of the cluster.", "properties": { "s3": { "$ref": "#/$defs/Spec.ToolsConfiguration.Terraform.State.S3" @@ -167,22 +172,23 @@ "Spec.ToolsConfiguration.Terraform.State.S3": { "type": "object", "additionalProperties": false, + "description": "Configuration for the S3 bucket used to store the Terraform state.", "properties": { "bucketName": { "$ref": "#/$defs/Types.AwsS3BucketName", - "description": "This value defines which bucket will be used to store all the states" + "description": "This value defines which bucket will be used to store all the states." }, "keyPrefix": { "$ref": "#/$defs/Types.AwsS3KeyPrefix", - "description": "This value defines which folder will be used to store all the states inside the bucket" + "description": "This value defines which folder will be used to store all the states inside the bucket." }, "region": { "$ref": "#/$defs/Types.AwsRegion", - "description": "This value defines in which region the bucket is located" + "description": "This value defines in which region the bucket is located." }, "skipRegionValidation": { "type": "boolean", - "description": "This value defines if the region of the bucket should be validated or not by Terraform, useful when using a bucket in a recently added region" + "description": "This value defines if the region of the bucket should be validated or not by Terraform, useful when using a bucket in a recently added region." } }, "required": [ @@ -196,12 +202,10 @@ "additionalProperties": false, "properties": { "vpc": { - "$ref": "#/$defs/Spec.Infrastructure.Vpc", - "description": "This key defines the VPC that will be created in AWS" + "$ref": "#/$defs/Spec.Infrastructure.Vpc" }, "vpn": { - "$ref": "#/$defs/Spec.Infrastructure.Vpn", - "description": "This section defines the creation of VPN bastions" + "$ref": "#/$defs/Spec.Infrastructure.Vpn" } }, "allOf": [ @@ -279,6 +283,7 @@ }, "Spec.Infrastructure.Vpc": { "type": "object", + "description": "Configuration for the VPC that will be created to host the EKS cluster and its related resources. If you already have a VPC that you want to use, leave this section empty and use `.spec.kubernetes.vpcId` instead.", "additionalProperties": false, "properties": { "network": { @@ -295,7 +300,7 @@ "properties": { "cidr": { "$ref": "#/$defs/Types.Cidr", - "description": "This is the CIDR of the VPC that will be created" + "description": "The network CIDR for the VPC that will be created" }, "subnetsCidrs": { "$ref": "#/$defs/Spec.Infrastructure.Vpc.Network.SubnetsCidrs" @@ -308,6 +313,7 @@ }, "Spec.Infrastructure.Vpc.Network.SubnetsCidrs": { "type": "object", + "description": "Network CIDRS configuration for private and public subnets.", "additionalProperties": false, "properties": { "private": { @@ -315,14 +321,14 @@ "items": { "$ref": "#/$defs/Types.Cidr" }, - "description": "These are the CIRDs for the private subnets, where the nodes, the pods, and the private load balancers will be created" + "description": "The network CIDRs for the private subnets, where the nodes, the pods, and the private load balancers will be created" }, "public": { "type": "array", "items": { "$ref": "#/$defs/Types.Cidr" }, - "description": "These are the CIDRs for the public subnets, where the public load balancers and the VPN servers will be created" + "description": "The network CIDRs for the public subnets, where the public load balancers and the VPN servers will be created" } }, "required": [ @@ -332,50 +338,51 @@ }, "Spec.Infrastructure.Vpn": { "type": "object", + "description": "Configuration for the VPN server instances.", "additionalProperties": false, "properties": { "instances": { "type": "integer", - "description": "The number of instances to create, 0 to skip the creation" + "description": "The number of VPN server instances to create, `0` to skip the creation." }, "port": { "$ref": "#/$defs/Types.TcpPort", - "description": "The port used by the OpenVPN server" + "description": "The port where each OpenVPN server will listen for connections." }, "instanceType": { "type": "string", - "description": "The size of the AWS EC2 instance" + "description": "The type of the AWS EC2 instance for each VPN server. Follows AWS EC2 nomenclature. Example: `t3-micro`." }, "diskSize": { "type": "integer", - "description": "The size of the disk in GB" + "description": "The size of the disk in GB for each VPN server. Example: entering `50` will create disks of 50 GB." }, "operatorName": { "type": "string", - "description": "The username of the account to create in the bastion's operating system" + "description": "The username of the account to create in the bastion's operating system." }, "dhParamsBits": { "type": "integer", - "description": "The dhParamsBits size used for the creation of the .pem file that will be used in the dh openvpn server.conf file" + "description": "The `dhParamsBits` size used for the creation of the .pem file that will be used in the dh openvpn server.conf file." }, "vpnClientsSubnetCidr": { "$ref": "#/$defs/Types.Cidr", - "description": "The CIDR that will be used to assign IP addresses to the VPN clients when connected" + "description": "The network CIDR that will be used to assign IP addresses to the VPN clients when connected." }, "ssh": { "$ref": "#/$defs/Spec.Infrastructure.Vpn.Ssh" }, "vpcId": { "$ref": "#/$defs/Types.AwsVpcId", - "description": "The VPC ID where the VPN servers will be created, required only if .spec.infrastructure.vpc is omitted" + "description": "The ID of the VPC where the VPN server instances will be created, required only if `.spec.infrastructure.vpc` is omitted." }, "bucketNamePrefix": { "$ref": "#/$defs/Types.AwsS3BucketNamePrefix", - "description": "This value defines the prefix that will be used to create the bucket name where the VPN servers will store the states" + "description": "This value defines the prefix for the bucket name where the VPN servers will store their state (VPN certificates, users)." }, "iamUserNameOverride": { "$ref": "#/$defs/Types.AwsIamRoleName", - "description": "Overrides the default IAM user name for the VPN" + "description": "Overrides IAM user name for the VPN. Default is to use the cluster name." } }, "required": [ @@ -399,7 +406,7 @@ } ] }, - "description": "This value defines the public keys that will be added to the bastion's operating system NOTES: Not yet implemented" + "description": "**NOT IN USE**, use `githubUsersName` instead. This value defines the public keys that will be added to the bastion's operating system." }, "githubUsersName": { "type": "array", @@ -407,14 +414,14 @@ "type": "string" }, "minItems": 1, - "description": "The github user name list that will be used to get the ssh public key that will be added as authorized key to the operatorName user" + "description": "List of GitHub usernames from whom get their SSH public key and add as authorized keys of the `operatorName` user." }, "allowedFromCidrs": { "type": "array", "items": { "$ref": "#/$defs/Types.Cidr" }, - "description": "The CIDR enabled in the security group that can access the bastions in SSH" + "description": "The network CIDR enabled in the security group to access the VPN servers (bastions) via SSH. Setting this to `0.0.0.0/0` will allow any source." } }, "required": [ @@ -424,33 +431,34 @@ }, "Spec.Kubernetes": { "type": "object", + "description": "Defines the Kubernetes components configuration and the values needed for the `kubernetes` phase of furyctl.", "additionalProperties": false, "properties": { "vpcId": { "$ref": "#/$defs/Types.AwsVpcId", - "description": "This value defines the VPC ID where the EKS cluster will be created, required only if .spec.infrastructure.vpc is omitted" + "description": "Required only if `.spec.infrastructure.vpc` is omitted. This value defines the ID of the VPC where the EKS cluster and its related resources will be created." }, "clusterIAMRoleNamePrefixOverride": { "$ref": "#/$defs/Types.AwsIamRoleNamePrefix", - "description": "Overrides the default IAM role name prefix for the EKS cluster" + "description": "Overrides the default prefix for the IAM role name of the EKS cluster. If not set, a name will be generated from the cluster name." }, "workersIAMRoleNamePrefixOverride": { "$ref": "#/$defs/Types.AwsIamRoleNamePrefix", - "description": "Overrides the default IAM role name prefix for the EKS workers" + "description": "Overrides the default prefix for the IAM role name of the EKS workers. If not set, a name will be generated from the cluster name." }, "subnetIds": { "type": "array", "items": { "$ref": "#/$defs/Types.AwsSubnetId" }, - "description": "This value defines the subnet IDs where the EKS cluster will be created, required only if .spec.infrastructure.vpc is omitted" + "description": "Required only if `.spec.infrastructure.vpc` is omitted. This value defines the ID of the subnet where the EKS cluster will be created." }, "apiServer": { "$ref": "#/$defs/Spec.Kubernetes.APIServer" }, "serviceIpV4Cidr": { "$ref": "#/$defs/Types.Cidr", - "description": "This value defines the CIDR that will be used to assign IP addresses to the services" + "description": "This value defines the network CIDR that will be used to assign IP addresses to Kubernetes services." }, "nodeAllowedSshPublicKey": { "anyOf": [ @@ -461,7 +469,7 @@ "$ref": "#/$defs/Types.FileRef" } ], - "description": "This key contains the ssh public key that can connect to the nodes via SSH using the ec2-user user" + "description": "The SSH public key that can connect to the nodes via SSH using the `ec2-user` user. Example: the contents of your `~/.ssh/id_ras.pub` file." }, "nodePoolsLaunchKind": { "type": "string", @@ -470,11 +478,44 @@ "launch_templates", "both" ], - "description": "Either `launch_configurations`, `launch_templates` or `both`. For new clusters use `launch_templates`, for existing cluster you'll need to migrate from `launch_configurations` to `launch_templates` using `both` as interim." + "description": "Accepted values are `launch_configurations`, `launch_templates` or `both`. For new clusters use `launch_templates`, for adopting an existing cluster you'll need to migrate from `launch_configurations` to `launch_templates` using `both` as interim." + }, + "nodePoolGlobalAmiType": { + "type": "string", + "enum": [ + "alinux2", + "alinux2023" + ], + "description": "Global default AMI type used for EKS worker nodes. This will apply to all node pools unless overridden by a specific node pool." }, "logRetentionDays": { "type": "integer", - "description": "Optional Kubernetes Cluster log retention in days. Defaults to 90 days." + "description": "Optional Kubernetes Cluster log retention in CloudWatch, expressed in days. Setting the value to zero (`0`) makes retention last forever. Default is `90` days.", + "enum": [ + 0, + 1, + 3, + 5, + 7, + 14, + 30, + 60, + 90, + 120, + 150, + 180, + 365, + 400, + 545, + 731, + 1096, + 1827, + 2192, + 2557, + 2922, + 3288, + 3653 + ] }, "logsTypes": { "type": "array", @@ -505,7 +546,8 @@ "apiServer", "nodeAllowedSshPublicKey", "nodePools", - "nodePoolsLaunchKind" + "nodePoolsLaunchKind", + "nodePoolGlobalAmiType" ] }, "Spec.Kubernetes.APIServer": { @@ -514,7 +556,7 @@ "properties": { "privateAccess": { "type": "boolean", - "description": "This value defines if the API server will be accessible only from the private subnets" + "description": "This value defines if the Kubernetes API server will be accessible from the private subnets. Default it `true`." }, "privateAccessCidrs": { "type": "array", @@ -522,7 +564,7 @@ "$ref": "#/$defs/Types.Cidr" }, "minItems": 0, - "description": "This value defines the CIDRs that will be allowed to access the API server from the private subnets" + "description": "The network CIDRs from the private subnets that will be allowed access the Kubernetes API server." }, "publicAccessCidrs": { "type": "array", @@ -530,11 +572,11 @@ "$ref": "#/$defs/Types.Cidr" }, "minItems": 0, - "description": "This value defines the CIDRs that will be allowed to access the API server from the public subnets" + "description": "The network CIDRs from the public subnets that will be allowed access the Kubernetes API server." }, "publicAccess": { "type": "boolean", - "description": "This value defines if the API server will be accessible from the public subnets" + "description": "This value defines if the Kubernetes API server will be accessible from the public subnets. Default is `false`." } }, "required": [ @@ -545,8 +587,10 @@ "Spec.Kubernetes.NodePool": { "type": "object", "additionalProperties": false, + "description": "Array with all the node pool definitions that will join the cluster. Each item is an object.", "properties": { "type": { + "description": "The type of Node Pool, can be `self-managed` for using customization like custom AMI, set max pods per node or `eks-managed` for using prebuilt AMIs from Amazon via the `ami.type` field. It is recommended to use `self-managed`.", "type": "string", "enum": [ "eks-managed", @@ -555,7 +599,7 @@ }, "name": { "type": "string", - "description": "The name of the node pool" + "description": "The name of the node pool." }, "ami": { "$ref": "#/$defs/Spec.Kubernetes.NodePool.Ami" @@ -566,7 +610,7 @@ "docker", "containerd" ], - "description": "The container runtime to use for the nodes" + "description": "The container runtime to use in the nodes of the node pool. Default is `containerd`." }, "size": { "$ref": "#/$defs/Spec.Kubernetes.NodePool.Size" @@ -579,26 +623,26 @@ "items": { "$ref": "#/$defs/Types.AwsArn" }, - "description": "This optional array defines additional target groups to attach to the instances in the node pool" + "description": "This optional array defines additional target groups to attach to the instances in the node pool." }, "labels": { "$ref": "#/$defs/Types.KubeLabels", - "description": "Kubernetes labels that will be added to the nodes" + "description": "Kubernetes labels that will be added to the nodes." }, "taints": { "$ref": "#/$defs/Types.KubeTaints", - "description": "Kubernetes taints that will be added to the nodes" + "description": "Kubernetes taints that will be added to the nodes." }, "tags": { "$ref": "#/$defs/Types.AwsTags", - "description": "AWS tags that will be added to the ASG and EC2 instances" + "description": "AWS tags that will be added to the ASG and EC2 instances." }, "subnetIds": { "type": "array", "items": { "$ref": "#/$defs/Types.AwsSubnetId" }, - "description": "This value defines the subnet IDs where the nodes will be created" + "description": "Optional list of subnet IDs where to create the nodes." }, "additionalFirewallRules": { "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRules" @@ -607,45 +651,124 @@ "required": [ "instance", "name", - "size" - ] + "size", + "type" + ], + "if": { + "allOf": [ + { + "properties": { + "type": { + "enum": [ + "eks-managed" + ] + } + } + } + ] + }, + "then": { + "properties": { + "ami": { + "properties": { + "id": { + "type": "null" + }, + "owner": { + "type": "null" + } + } + } + } + } }, "Spec.Kubernetes.NodePool.Ami": { "type": "object", "additionalProperties": false, + "description": "Configuration for customize the Amazon Machine Image (AMI) for the machines of the Node Pool.\n\nThe AMI can be chosen either by specifing the `ami.id` and `ami.owner` fields for using a custom AMI (just with `self-managed` node pool type) or by setting the `ami.type` field to one of the official AMIs based on Amazon Linux.", "properties": { "id": { "type": "string", - "description": "The AMI ID to use for the nodes" + "description": "The ID of the AMI to use for the nodes, must be set toghether with the `owner` field. `ami.id` and `ami.owner` can be only set when Node Pool type is `self-managed` and they can't be set at the same time than `ami.type`." }, "owner": { "type": "string", - "description": "The owner of the AMI" + "description": "The owner of the AMI to use for the nodes, must be set toghether with the `id` field. `ami.id` and `ami.owner` can be only set when Node Pool type is `self-managed` and they can't be set at the same time than `ami.type`." + }, + "type": { + "type": "string", + "description": "The AMI type defines the AMI to use for `eks-managed` and `self-managed` type of Node Pools. Only Amazon Linux based AMIs are supported. It can't be set at the same time than `ami.id` and `ami.owner`.", + "enum": [ + "alinux2", + "alinux2023" + ] } }, - "required": [ - "id", - "owner" + "oneOf": [ + { + "allOf": [ + { + "required": [ + "id", + "owner" + ] + }, + { + "not": { + "required": [ + "type" + ] + } + } + ] + }, + { + "allOf": [ + { + "required": [ + "type" + ] + }, + { + "not": { + "anyOf": [ + { + "required": [ + "id" + ] + }, + { + "required": [ + "owner" + ] + } + ] + } + } + ] + } ] }, "Spec.Kubernetes.NodePool.Instance": { "type": "object", "additionalProperties": false, + "description": "Configuration for the instances that will be used in the node pool.", "properties": { "type": { "type": "string", - "description": "The instance type to use for the nodes" + "description": "The instance type to use for the nodes." }, "spot": { "type": "boolean", - "description": "If true, the nodes will be created as spot instances" + "description": "If `true`, the nodes will be created as spot instances. Default is `false`." }, "volumeSize": { "type": "integer", - "description": "The size of the disk in GB" + "description": "The size of the disk in GB." }, "volumeType": { "type": "string", + "description": "Volume type for the instance disk. Default is `gp2`.", "enum": [ "gp2", "gp3", @@ -654,7 +777,8 @@ ] }, "maxPods": { - "type": "integer" + "type": "integer", + "description": "Set the maximum pods per node to a custom value. If not set will use EKS default value that depends on the instance type.\n\nRef: https://github.com/awslabs/amazon-eks-ami/blob/main/templates/shared/runtime/eni-max-pods.txt" } }, "required": [ @@ -668,12 +792,12 @@ "min": { "type": "integer", "minimum": 0, - "description": "The minimum number of nodes in the node pool" + "description": "The minimum number of nodes in the node pool." }, "max": { "type": "integer", "minimum": 0, - "description": "The maximum number of nodes in the node pool" + "description": "The maximum number of nodes in the node pool." } }, "required": [ @@ -684,6 +808,7 @@ "Spec.Kubernetes.NodePool.AdditionalFirewallRules": { "type": "object", "additionalProperties": false, + "description": "Optional additional firewall rules that will be attached to the nodes.", "properties": { "cidrBlocks": { "type": "array", @@ -691,7 +816,8 @@ "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRule.CidrBlock" }, "minItems": 1, - "description": "The CIDR blocks for the FW rule. At the moment the first item of the list will be used, others will be ignored." + "maxItems": 1, + "description": "The CIDR blocks objects definition for the Firewall rule. Even though it is a list, only one item is currently supported. See https://github.com/sighupio/fury-eks-installer/issues/46 for more details." }, "sourceSecurityGroupId": { "type": "array", @@ -718,13 +844,15 @@ }, "type": { "type": "string", + "description": "The type of the Firewall rule, can be `ingress` for incoming traffic or `egress` for outgoing traffic.", "enum": [ "ingress", "egress" ] }, "tags": { - "$ref": "#/$defs/Types.AwsTags" + "$ref": "#/$defs/Types.AwsTags", + "description": "Additional AWS tags for the Firewall rule." }, "cidrBlocks": { "type": "array", @@ -754,7 +882,7 @@ "properties": { "name": { "type": "string", - "description": "The name of the FW rule" + "description": "The name for the additional Firewall rule Security Group." }, "type": { "type": "string", @@ -762,19 +890,19 @@ "ingress", "egress" ], - "description": "The type of the FW rule can be ingress or egress" + "description": "The type of the Firewall rule, can be `ingress` for incoming traffic or `egress` for outgoing traffic." }, "tags": { "$ref": "#/$defs/Types.AwsTags", - "description": "The tags of the FW rule" + "description": "Additional AWS tags for the Firewall rule." }, "sourceSecurityGroupId": { "type": "string", - "description": "The source security group ID" + "description": "The source security group ID." }, "protocol": { "$ref": "#/$defs/Types.AwsIpProtocol", - "description": "The protocol of the FW rule" + "description": "The protocol of the Firewall rule." }, "ports": { "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRule.Ports" @@ -794,7 +922,7 @@ "properties": { "name": { "type": "string", - "description": "The name of the FW rule" + "description": "The name of the Firewall rule." }, "type": { "type": "string", @@ -802,19 +930,19 @@ "ingress", "egress" ], - "description": "The type of the FW rule can be ingress or egress" + "description": "The type of the Firewall rule, can be `ingress` for incoming traffic or `egress` for outgoing traffic." }, "tags": { "$ref": "#/$defs/Types.AwsTags", - "description": "The tags of the FW rule" + "description": "Additional AWS tags for the Firewall rule." }, "self": { "type": "boolean", - "description": "If true, the source will be the security group itself" + "description": "If `true`, the source will be the security group itself." }, "protocol": { "$ref": "#/$defs/Types.AwsIpProtocol", - "description": "The protocol of the FW rule" + "description": "The protocol of the Firewall rule." }, "ports": { "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRule.Ports" @@ -830,6 +958,7 @@ }, "Spec.Kubernetes.NodePool.AdditionalFirewallRule.Ports": { "type": "object", + "description": "Port range for the Firewall Rule.", "additionalProperties": false, "properties": { "from": { @@ -846,6 +975,7 @@ }, "Spec.Kubernetes.AwsAuth": { "type": "object", + "description": "Optional additional security configuration for EKS IAM via the `aws-auth` configmap.\n\nRef: https://docs.aws.amazon.com/eks/latest/userguide/auth-configmap.html", "additionalProperties": false, "properties": { "additionalAccounts": { @@ -853,21 +983,21 @@ "items": { "type": "string" }, - "description": "This optional array defines additional AWS accounts that will be added to the aws-auth configmap" + "description": "This optional array defines additional AWS accounts that will be added to the `aws-auth` configmap." }, "users": { "type": "array", "items": { "$ref": "#/$defs/Spec.Kubernetes.AwsAuth.User" }, - "description": "This optional array defines additional IAM users that will be added to the aws-auth configmap" + "description": "This optional array defines additional IAM users that will be added to the `aws-auth` configmap." }, "roles": { "type": "array", "items": { "$ref": "#/$defs/Spec.Kubernetes.AwsAuth.Role" }, - "description": "This optional array defines additional IAM roles that will be added to the aws-auth configmap" + "description": "This optional array defines additional IAM roles that will be added to the `aws-auth` configmap." } } }, @@ -1004,28 +1134,29 @@ "Spec.Distribution.Common": { "type": "object", "additionalProperties": false, + "description": "Common configuration for all the distribution modules.", "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for all the KFD modules" + "description": "The node selector to use to place the pods for all the KFD modules. Follows Kubernetes selector format. Example: `node.kubernetes.io/role: infra`." }, "tolerations": { "type": "array", "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for all the KFD modules" + "description": "An array with the tolerations that will be added to the pods for all the KFD modules. Follows Kubernetes tolerations format. Example:\n\n```yaml\n- effect: NoSchedule\n key: node.kubernetes.io/role\n value: infra\n```" }, "provider": { "$ref": "#/$defs/Spec.Distribution.Common.Provider" }, "relativeVendorPath": { "type": "string", - "description": "The relative path to the vendor directory, does not need to be changed" + "description": "The relative path to the vendor directory, does not need to be changed." }, "registry": { "type": "string", - "description": "URL of the registry where to pull images from for the Distribution phase. (Default is registry.sighup.io/fury).\n\nNOTE: If plugins are pulling from the default registry, the registry will be replaced for these plugins too." + "description": "URL of the registry where to pull images from for the Distribution phase. (Default is `registry.sighup.io/fury`).\n\nNOTE: If plugins are pulling from the default registry, the registry will be replaced for the plugin too." } } }, @@ -1035,7 +1166,7 @@ "properties": { "type": { "type": "string", - "description": "The type of the provider, must be EKS if specified" + "description": "The provider type. Don't set. FOR INTERNAL USE ONLY." } }, "required": [ @@ -1090,14 +1221,15 @@ }, "baseDomain": { "type": "string", - "description": "the base domain used for all the KFD ingresses, if in the nginx dual configuration, it should be the same as the .spec.distribution.modules.ingress.dns.private.name zone" + "description": "The base domain used for all the KFD infrastructural ingresses. If in the nginx `dual` configuration type, this value should be the same as the `.spec.distribution.modules.ingress.dns.private.name` zone." }, "nginx": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx", - "description": "Configurations for the nginx ingress controller module" + "description": "Configurations for the Ingress nginx controller package." }, "certManager": { - "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.CertManager" + "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.CertManager", + "description": "Configuration for the cert-manager package. Required even if `ingress.nginx.type` is `none`, cert-manager is used for managing other certificates in the cluster besides the TLS termination certificates for the ingresses." }, "dns": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.DNS" @@ -1108,47 +1240,102 @@ }, "required": [ "baseDomain", - "dns", "nginx" ], - "if": { - "properties": { - "nginx": { + "allOf": [ + { + "if": { "properties": { - "tls": { + "nginx": { "properties": { - "provider": { - "const": "certManager" + "type": { + "const": "dual" + } + } + } + } + }, + "then": { + "required": [ + "dns" + ], + "properties": { + "dns": { + "required": [ + "public", + "private" + ] + } + } + } + }, + { + "if": { + "properties": { + "nginx": { + "properties": { + "type": { + "const": "single" + } + } + } + } + }, + "then": { + "required": [ + "dns" + ], + "properties": { + "dns": { + "required": [ + "public" + ] + } + } + } + }, + { + "if": { + "properties": { + "nginx": { + "properties": { + "tls": { + "properties": { + "provider": { + "const": "certManager" + } + } } } } } + }, + "then": { + "required": [ + "certManager" + ] } } - }, - "then": { - "required": [ - "certManager" - ] - } + ] }, "Spec.Distribution.Modules.Ingress.Overrides": { "type": "object", "additionalProperties": false, + "description": "Override the common configuration with a particular configuration for the Ingress module.", "properties": { "ingresses": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Overrides.Ingresses" }, "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the ingress module" + "description": "Set to override the node selector used to place the pods of the Ingress module." }, "tolerations": { "type": "array", "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for the ingress module" + "description": "Set to override the tolerations that will be added to the pods of the Ingress module." } } }, @@ -1181,7 +1368,7 @@ "single", "dual" ], - "description": "The type of the nginx ingress controller, must be ***none***, ***single*** or ***dual***" + "description": "The type of the Ingress nginx controller, options are:\n- `none`: no ingress controller will be installed and no infrastructural ingresses will be created.\n- `single`: a single ingress controller with ingress class `nginx` will be installed to manage all the ingress resources, infrastructural ingresses will be created.\n- `dual`: two independent ingress controllers will be installed, one for the `internal` ingress class intended for private ingresses and one for the `external` ingress class intended for public ingresses. KFD infrastructural ingresses wil use the `internal` ingress class when using the dual type.\n\nDefault is `single`." }, "tls": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx.TLS" @@ -1205,7 +1392,7 @@ "secret", "none" ], - "description": "The provider of the TLS certificate, must be ***none***, ***certManager*** or ***secret***" + "description": "The provider of the TLS certificates for the ingresses, one of: `none`, `certManager`, or `secret`." }, "secret": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx.TLS.Secret" @@ -1230,16 +1417,19 @@ "Spec.Distribution.Modules.Ingress.Nginx.TLS.Secret": { "type": "object", "additionalProperties": false, + "description": "Kubernetes TLS secret for the ingresses TLS certificate.", "properties": { "cert": { "type": "string", - "description": "The certificate file content or you can use the file notation to get the content from a file" + "description": "The certificate file's content. You can use the `\"{file://}\"` notation to get the content from a file." }, "key": { - "type": "string" + "type": "string", + "description": "The signing key file's content. You can use the `\"{file://}\"` notation to get the content from a file." }, "ca": { - "type": "string" + "type": "string", + "description": "The Certificate Authority certificate file's content. You can use the `\"{file://}\"` notation to get the content from a file." } }, "required": [ @@ -1251,6 +1441,7 @@ "Spec.Distribution.Modules.Ingress.CertManager": { "type": "object", "additionalProperties": false, + "description": "Configuration for the cert-manager package. Required even if `ingress.nginx.type` is `none`, cert-manager is used for managing other certificates in the cluster besides the TLS termination certificates for the ingresses.", "properties": { "clusterIssuer": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.CertManager.ClusterIssuer" @@ -1266,15 +1457,16 @@ "Spec.Distribution.Modules.Ingress.CertManager.ClusterIssuer": { "type": "object", "additionalProperties": false, + "description": "Configuration for the cert-manager's ACME clusterIssuer used to request certificates from Let's Encrypt.", "properties": { "name": { "type": "string", - "description": "The name of the cluster issuer" + "description": "The name of the clusterIssuer." }, "email": { "type": "string", "format": "email", - "description": "The email of the cluster issuer" + "description": "The email address to use during the certificate issuing process." }, "type": { "type": "string", @@ -1282,11 +1474,11 @@ "dns01", "http01" ], - "description": "The type of the cluster issuer, must be ***dns01*** or ***http01***" + "description": "The type of the clusterIssuer, must be `dns01` for using DNS challenge or `http01` for using HTTP challenge." }, "solvers": { "type": "array", - "description": "The custom solvers configurations" + "description": "The list of challenge solvers to use instead of the default one for the `http01` challenge. Check [cert manager's documentation](https://cert-manager.io/docs/configuration/acme/#adding-multiple-solver-types) for examples for this field." } }, "required": [ @@ -1308,6 +1500,7 @@ }, "Spec.Distribution.Modules.Ingress.DNS": { "type": "object", + "description": "DNS definition, used in conjunction with `externalDNS` package to automate DNS management and certificates emission.", "additionalProperties": false, "properties": { "public": { @@ -1319,11 +1512,7 @@ "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" } - }, - "required": [ - "public", - "private" - ] + } }, "Spec.Distribution.Modules.Ingress.DNS.Public": { "type": "object", @@ -1331,11 +1520,11 @@ "properties": { "name": { "type": "string", - "description": "The name of the public hosted zone" + "description": "The name of the public hosted zone." }, "create": { "type": "boolean", - "description": "If true, the public hosted zone will be created" + "description": "By default, a Terraform data source will be used to get the public DNS zone. Set to `true` to create the public zone instead." } }, "required": [ @@ -1345,15 +1534,16 @@ }, "Spec.Distribution.Modules.Ingress.DNS.Private": { "type": "object", + "description": "The private DNS zone is used only when `ingress.nginx.type` is `dual`, for exposing infrastructural services only in the private DNS zone.", "additionalProperties": false, "properties": { "name": { "type": "string", - "description": "The name of the private hosted zone" + "description": "The name of the private hosted zone. Example: `internal.fury-demo.sighup.io`." }, "create": { "type": "boolean", - "description": "If true, the private hosted zone will be created" + "description": "By default, a Terraform data source will be used to get the private DNS zone. Set to `true` to create the private zone instead." } }, "required": [ @@ -1364,6 +1554,7 @@ "Spec.Distribution.Modules.Logging": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Logging module.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -1376,7 +1567,7 @@ "loki", "customOutputs" ], - "description": "selects the logging stack. Choosing none will disable the centralized logging. Choosing opensearch will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored. Choosing loki will use a distributed Grafana Loki instead of OpenSearh for storage. Choosing customOuput the Logging Operator will be deployed and installed but with no local storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage." + "description": "Selects the logging stack. Options are:\n- `none`: will disable the centralized logging.\n- `opensearch`: will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored.\n- `loki`: will use a distributed Grafana Loki instead of OpenSearch for storage.\n- `customOuputs`: the Logging Operator will be deployed and installed but without in-cluster storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage.\n\nDefault is `opensearch`." }, "opensearch": { "$ref": "#/$defs/Spec.Distribution.Modules.Logging.Opensearch" @@ -1415,6 +1606,20 @@ ] } }, + { + "if": { + "properties": { + "type": { + "const": "loki" + } + } + }, + "then": { + "required": [ + "loki" + ] + } + }, { "if": { "properties": { @@ -1441,14 +1646,14 @@ "single", "triple" ], - "description": "The type of the opensearch, must be ***single*** or ***triple***" + "description": "The type of OpenSearch deployment. One of: `single` for a single replica or `triple` for an HA 3-replicas deployment." }, "resources": { "$ref": "#/$defs/Types.KubeResources" }, "storageSize": { "type": "string", - "description": "The storage size for the opensearch pods" + "description": "The storage size for the OpenSearch volumes. Follows Kubernetes resources storage requests. Default is `150Gi`." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" @@ -1460,6 +1665,7 @@ }, "Spec.Distribution.Modules.Logging.Cerebro": { "type": "object", + "description": "DEPRECATED since KFD v1.26.6, 1.27.5, v1.28.0.", "additionalProperties": false, "properties": { "overrides": { @@ -1470,10 +1676,11 @@ "Spec.Distribution.Modules.Logging.Minio": { "type": "object", "additionalProperties": false, + "description": "Configuration for Logging's MinIO deployment.", "properties": { "storageSize": { "type": "string", - "description": "The PVC size for each minio disk, 6 disks total" + "description": "The PVC size for each MinIO disk, 6 disks total." }, "rootUser": { "type": "object", @@ -1481,11 +1688,11 @@ "properties": { "username": { "type": "string", - "description": "The username of the minio root user" + "description": "The username for the default MinIO root user." }, "password": { "type": "string", - "description": "The password of the minio root user" + "description": "The password for the default MinIO root user." } } }, @@ -1496,10 +1703,12 @@ }, "Spec.Distribution.Modules.Logging.Loki": { "type": "object", + "description": "Configuration for the Loki package.", "additionalProperties": false, "properties": { "backend": { "type": "string", + "description": "The storage backend type for Loki. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external object storage instead of deploying an in-cluster MinIO.", "enum": [ "minio", "externalEndpoint" @@ -1508,37 +1717,47 @@ "externalEndpoint": { "type": "object", "additionalProperties": false, + "description": "Configuration for Loki's external storage backend.", "properties": { "endpoint": { "type": "string", - "description": "The endpoint of the loki external endpoint" + "description": "External S3-compatible endpoint for Loki's storage." }, "insecure": { "type": "boolean", - "description": "If true, the loki external endpoint will be insecure" + "description": "If true, will use HTTP as protocol instead of HTTPS." }, "secretAccessKey": { "type": "string", - "description": "The secret access key of the loki external endpoint" + "description": "The secret access key (password) for the external S3-compatible bucket." }, "accessKeyId": { "type": "string", - "description": "The access key id of the loki external endpoint" + "description": "The access key ID (username) for the external S3-compatible bucket." }, "bucketName": { "type": "string", - "description": "The bucket name of the loki external endpoint" + "description": "The bucket name of the external S3-compatible object storage." } } }, + "tsdbStartDate": { + "type": "string", + "format": "date", + "description": "Starting from versions 1.28.4, 1.29.5 and 1.30.0 of KFD, Loki will change the time series database from BoltDB to TSDB and the schema from v11 to v13 that it uses to store the logs.\n\nThe value of this field will determine the date when Loki will start writing using the new TSDB and the schema v13, always at midnight UTC. The old BoltDB and schema will be kept until they expire for reading purposes.\n\nValue must be a string in `ISO 8601` date format (`yyyy-mm-dd`). Example: `2024-11-18`." + }, "resources": { "$ref": "#/$defs/Types.KubeResources" } - } + }, + "required": [ + "tsdbStartDate" + ] }, "Spec.Distribution.Modules.Logging.Operator": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Logging Operator.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" @@ -1546,41 +1765,41 @@ } }, "Spec.Distribution.Modules.Logging.CustomOutputs": { - "description": "when using the customOutputs logging type, you need to manually specify the spec of the several Output and ClusterOutputs that the Logging Operator expects to forward the logs collected by the pre-defined flows.", + "description": "When using the `customOutputs` logging type, you need to manually specify the spec of the several `Output` and `ClusterOutputs` that the Logging Operator expects to forward the logs collected by the pre-defined flows.", "type": "object", "additionalProperties": false, "properties": { "audit": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `audit` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "events": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `events` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "infra": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `infra` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "ingressNginx": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `ingressNginx` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "kubernetes": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `kubernetes` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "systemdCommon": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `systemdCommon` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "systemdEtcd": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `systemdEtcd` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "errors": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `errors` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" } }, "required": [ @@ -1597,7 +1816,7 @@ "Spec.Distribution.Modules.Monitoring": { "type": "object", "additionalProperties": false, - "description": "configuration for the Monitoring module components", + "description": "Configuration for the Monitoring module.", "properties": { "type": { "type": "string", @@ -1607,7 +1826,7 @@ "prometheusAgent", "mimir" ], - "description": "The type of the monitoring, must be ***none***, ***prometheus***, ***prometheusAgent*** or ***mimir***.\n\n- `none`: will disable the whole monitoring stack.\n- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instance, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more.\n- `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster.\n- `mimir`: will install the same as the `prometheus` option, and in addition Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage." + "description": "The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or `mimir`.\n\n- `none`: will disable the whole monitoring stack.\n- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instance, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more.\n- `prometheusAgent`: will install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster.\n- `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage.\n\nDefault is `prometheus`." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -1653,15 +1872,15 @@ }, "retentionTime": { "type": "string", - "description": "The retention time for the k8s Prometheus instance." + "description": "The retention time for the `k8s` Prometheus instance." }, "retentionSize": { "type": "string", - "description": "The retention size for the k8s Prometheus instance." + "description": "The retention size for the `k8s` Prometheus instance." }, "storageSize": { "type": "string", - "description": "The storage size for the k8s Prometheus instance." + "description": "The storage size for the `k8s` Prometheus instance." }, "remoteWrite": { "description": "Set this option to ship the collected metrics to a remote Prometheus receiver.\n\n`remoteWrite` is an array of objects that allows configuring the [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for Prometheus. The objects in the array follow [the same schema as in the prometheus operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec).", @@ -1694,15 +1913,15 @@ "properties": { "deadManSwitchWebhookUrl": { "type": "string", - "description": "The webhook url to send deadman switch monitoring, for example to use with healthchecks.io" + "description": "The webhook URL to send dead man's switch monitoring, for example to use with healthchecks.io." }, "installDefaultRules": { "type": "boolean", - "description": "If true, the default rules will be installed" + "description": "Set to false to avoid installing the Prometheus rules (alerts) included with the distribution." }, "slackWebhookUrl": { "type": "string", - "description": "The slack webhook url to send alerts" + "description": "The Slack webhook URL where to send the infrastructural and workload alerts to." } } }, @@ -1753,10 +1972,11 @@ "Spec.Distribution.Modules.Monitoring.Mimir": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Mimir package.", "properties": { "retentionTime": { "type": "string", - "description": "The retention time for the mimir pods" + "description": "The retention time for the logs stored in Mimir. Default is `30d`. Value must match the regular expression `[0-9]+(ns|us|µs|ms|s|m|h|d|w|y)` where y = 365 days." }, "backend": { "type": "string", @@ -1764,31 +1984,32 @@ "minio", "externalEndpoint" ], - "description": "The backend for the mimir pods, must be ***minio*** or ***externalEndpoint***" + "description": "The storage backend type for Mimir. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external S3-compatible object storage instead of deploying an in-cluster MinIO." }, "externalEndpoint": { "type": "object", "additionalProperties": false, + "description": "Configuration for Mimir's external storage backend.", "properties": { "endpoint": { "type": "string", - "description": "The endpoint of the external mimir backend" + "description": "The external S3-compatible endpoint for Mimir's storage." }, "insecure": { "type": "boolean", - "description": "If true, the external mimir backend will not use tls" + "description": "If true, will use HTTP as protocol instead of HTTPS." }, "secretAccessKey": { "type": "string", - "description": "The secret access key of the external mimir backend" + "description": "The secret access key (password) for the external S3-compatible bucket." }, "accessKeyId": { "type": "string", - "description": "The access key id of the external mimir backend" + "description": "The access key ID (username) for the external S3-compatible bucket." }, "bucketName": { "type": "string", - "description": "The bucket name of the external mimir backend" + "description": "The bucket name of the external S3-compatible object storage." } } }, @@ -1799,11 +2020,12 @@ }, "Spec.Distribution.Modules.Monitoring.Minio": { "type": "object", + "description": "Configuration for Monitoring's MinIO deployment.", "additionalProperties": false, "properties": { "storageSize": { "type": "string", - "description": "The storage size for the minio pods" + "description": "The PVC size for each MinIO disk, 6 disks total." }, "rootUser": { "type": "object", @@ -1811,11 +2033,11 @@ "properties": { "username": { "type": "string", - "description": "The username for the minio root user" + "description": "The username for the default MinIO root user." }, "password": { "type": "string", - "description": "The password for the minio root user" + "description": "The password for the default MinIO root user." } } }, @@ -1827,6 +2049,7 @@ "Spec.Distribution.Modules.Tracing": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Tracing module.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -1837,7 +2060,7 @@ "none", "tempo" ], - "description": "The type of tracing to use, either ***none*** or ***tempo***" + "description": "The type of tracing to use, either `none` or `tempo`. `none` will disable the Tracing module and `tempo` will install a Grafana Tempo deployment.\n\nDefault is `tempo`." }, "tempo": { "$ref": "#/$defs/Spec.Distribution.Modules.Tracing.Tempo" @@ -1853,10 +2076,11 @@ "Spec.Distribution.Modules.Tracing.Tempo": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Tempo package.", "properties": { "retentionTime": { "type": "string", - "description": "The retention time for the tempo pods" + "description": "The retention time for the traces stored in Tempo." }, "backend": { "type": "string", @@ -1864,31 +2088,32 @@ "minio", "externalEndpoint" ], - "description": "The backend for the tempo pods, must be ***minio*** or ***externalEndpoint***" + "description": "The storage backend type for Tempo. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external S3-compatible object storage instead of deploying an in-cluster MinIO." }, "externalEndpoint": { + "description": "Configuration for Tempo's external storage backend.", "type": "object", "additionalProperties": false, "properties": { "endpoint": { "type": "string", - "description": "The endpoint of the external tempo backend" + "description": "The external S3-compatible endpoint for Tempo's storage." }, "insecure": { "type": "boolean", - "description": "If true, the external tempo backend will not use tls" + "description": "If true, will use HTTP as protocol instead of HTTPS." }, "secretAccessKey": { "type": "string", - "description": "The secret access key of the external tempo backend" + "description": "The secret access key (password) for the external S3-compatible bucket." }, "accessKeyId": { "type": "string", - "description": "The access key id of the external tempo backend" + "description": "The access key ID (username) for the external S3-compatible bucket." }, "bucketName": { "type": "string", - "description": "The bucket name of the external tempo backend" + "description": "The bucket name of the external S3-compatible object storage." } } }, @@ -1899,11 +2124,12 @@ }, "Spec.Distribution.Modules.Tracing.Minio": { "type": "object", + "description": "Configuration for Tracing's MinIO deployment.", "additionalProperties": false, "properties": { "storageSize": { "type": "string", - "description": "The storage size for the minio pods" + "description": "The PVC size for each MinIO disk, 6 disks total." }, "rootUser": { "type": "object", @@ -1911,11 +2137,11 @@ "properties": { "username": { "type": "string", - "description": "The username for the minio root user" + "description": "The username for the default MinIO root user." }, "password": { "type": "string", - "description": "The password for the minio root user" + "description": "The password for the default MinIO root user." } } }, @@ -1927,9 +2153,10 @@ "Spec.Distribution.Modules.Networking": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Networking module.", "properties": { "overrides": { - "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + "$ref": "#/$defs/Types.FuryModuleOverrides" }, "tigeraOperator": { "$ref": "#/$defs/Spec.Distribution.Modules.Networking.TigeraOperator" @@ -1948,6 +2175,7 @@ "Spec.Distribution.Modules.Policy": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Policy module.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -1959,7 +2187,7 @@ "gatekeeper", "kyverno" ], - "description": "The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno***" + "description": "The type of policy enforcement to use, either `none`, `gatekeeper` or `kyverno`.\n\nDefault is `none`." }, "gatekeeper": { "$ref": "#/$defs/Spec.Distribution.Modules.Policy.Gatekeeper" @@ -2005,6 +2233,7 @@ "Spec.Distribution.Modules.Policy.Gatekeeper": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Gatekeeper package.", "properties": { "additionalExcludedNamespaces": { "type": "array", @@ -2020,11 +2249,11 @@ "dryrun", "warn" ], - "description": "The enforcement action to use for the gatekeeper module" + "description": "The default enforcement action to use for the included constraints. `deny` will block the admission when violations to the policies are found, `warn` will show a message to the user but will admit the violating requests and `dryrun` won't give any feedback to the user but it will log the violations." }, "installDefaultPolicies": { "type": "boolean", - "description": "If true, the default policies will be installed" + "description": "Set to `false` to avoid installing the default Gatekeeper policies (constraints templates and constraints) included with the distribution." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" @@ -2038,25 +2267,26 @@ "Spec.Distribution.Modules.Policy.Kyverno": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Kyverno package.", "properties": { "additionalExcludedNamespaces": { "type": "array", "items": { "type": "string" }, - "description": "This parameter adds namespaces to Kyverno's exemption list, so it will not enforce the constraints on them." + "description": "This parameter adds namespaces to Kyverno's exemption list, so it will not enforce the policies on them." }, "validationFailureAction": { "type": "string", "enum": [ - "audit", - "enforce" + "Audit", + "Enforce" ], - "description": "The validation failure action to use for the kyverno module" + "description": "The validation failure action to use for the policies, `Enforce` will block when a request does not comply with the policies and `Audit` will not block but log when a request does not comply with the policies." }, "installDefaultPolicies": { "type": "boolean", - "description": "If true, the default policies will be installed" + "description": "Set to `false` to avoid installing the default Kyverno policies included with distribution." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" @@ -2070,6 +2300,7 @@ "Spec.Distribution.Modules.Dr": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Disaster Recovery module.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -2080,7 +2311,7 @@ "none", "eks" ], - "description": "The type of the DR, must be ***none*** or ***eks***" + "description": "The type of the Disaster Recovery, must be `none` or `eks`. `none` disables the module and `eks` will install Velero and use an S3 bucket to store the backups.\n\nDefault is `none`." }, "velero": { "$ref": "#/$defs/Spec.Distribution.Modules.Dr.Velero" @@ -2107,6 +2338,58 @@ "type": "object", "additionalProperties": false, "properties": { + "schedules": { + "type": "object", + "additionalProperties": false, + "description": "Configuration for Velero's backup schedules.", + "properties": { + "install": { + "type": "boolean", + "description": "Whether to install or not the default `manifests` and `full` backups schedules. Default is `true`." + }, + "definitions": { + "type": "object", + "additionalProperties": false, + "description": "Configuration for Velero schedules.", + "properties": { + "manifests": { + "type": "object", + "additionalProperties": false, + "description": "Configuration for Velero's manifests backup schedule.", + "properties": { + "schedule": { + "type": "string", + "description": "The cron expression for the `manifests` backup schedule (default `*/15 * * * *`)." + }, + "ttl": { + "type": "string", + "description": "The Time To Live (TTL) of the backups created by the backup schedules (default `720h0m0s`, 30 days). Notice that changing this value will affect only newly created backups, prior backups will keep the old TTL." + } + } + }, + "full": { + "type": "object", + "additionalProperties": false, + "description": "Configuration for Velero's manifests backup schedule.", + "properties": { + "schedule": { + "type": "string", + "description": "The cron expression for the `full` backup schedule (default `0 1 * * *`)." + }, + "ttl": { + "type": "string", + "description": "The Time To Live (TTL) of the backups created by the backup schedules (default `720h0m0s`, 30 days). Notice that changing this value will affect only newly created backups, prior backups will keep the old TTL." + }, + "snapshotMoveData": { + "type": "boolean", + "description": "EXPERIMENTAL (if you do more than one backups, the following backups after the first are not automatically restorable, see https://github.com/vmware-tanzu/velero/issues/7057#issuecomment-2466815898 for the manual restore solution): SnapshotMoveData specifies whether snapshot data should be moved. Velero will create a new volume from the snapshot and upload the content to the storageLocation." + } + } + } + } + } + } + }, "eks": { "$ref": "#/$defs/Spec.Distribution.Modules.Dr.Velero.Eks" }, @@ -2124,12 +2407,12 @@ "properties": { "region": { "$ref": "#/$defs/Types.AwsRegion", - "description": "The region where the velero bucket is located" + "description": "The region where the bucket for Velero will be located." }, "bucketName": { "$ref": "#/$defs/Types.AwsS3BucketName", "maxLength": 49, - "description": "The name of the velero bucket" + "description": "The name of the bucket for Velero." } }, "required": [ @@ -2140,6 +2423,7 @@ "Spec.Distribution.Modules.Auth": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Auth module.", "properties": { "overrides": { "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Overrides" @@ -2149,7 +2433,7 @@ }, "baseDomain": { "type": "string", - "description": "The base domain for the auth module" + "description": "The base domain for the ingresses created by the Auth module (Gangplank, Pomerium, Dex). Notice that when the ingress module type is `dual`, these will use the `external` ingress class." }, "pomerium": { "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Pomerium" @@ -2228,10 +2512,11 @@ "Spec.Distribution.Modules.Auth.Overrides": { "type": "object", "additionalProperties": false, + "description": "Override the common configuration with a particular configuration for the Auth module.", "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the auth module" + "description": "Set to override the node selector used to place the pods of the Auth module." }, "tolerations": { "type": [ @@ -2241,10 +2526,11 @@ "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for the auth module" + "description": "Set to override the tolerations that will be added to the pods of the Auth module." }, "ingresses": { "type": "object", + "description": "Override the definition of the Auth module ingresses.", "additionalProperties": { "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Overrides.Ingress" } @@ -2257,11 +2543,11 @@ "properties": { "host": { "type": "string", - "description": "The host of the ingress" + "description": "Use this host for the ingress instead of the default one." }, "ingressClass": { "type": "string", - "description": "The ingress class of the ingress" + "description": "Use this ingress class for the ingress instead of the default one." } }, "required": [ @@ -2280,7 +2566,7 @@ "basicAuth", "sso" ], - "description": "The type of the provider, must be ***none***, ***sso*** or ***basicAuth***" + "description": "The type of the Auth provider, options are:\n- `none`: will disable authentication in the infrastructural ingresses.\n- `sso`: will protect the infrastructural ingresses with Pomerium and Dex (SSO) and require authentication before accessing them.\n- `basicAuth`: will protect the infrastructural ingresses with HTTP basic auth (username and password) authentication.\n\nDefault is `none`." }, "basicAuth": { "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Provider.BasicAuth" @@ -2293,14 +2579,15 @@ "Spec.Distribution.Modules.Auth.Provider.BasicAuth": { "type": "object", "additionalProperties": false, + "description": "Configuration for the HTTP Basic Auth provider.", "properties": { "username": { "type": "string", - "description": "The username for the basic auth" + "description": "The username for logging in with the HTTP basic authentication." }, "password": { "type": "string", - "description": "The password for the basic auth" + "description": "The password for logging in with the HTTP basic authentication." } }, "required": [ @@ -2314,14 +2601,15 @@ "Spec.Distribution.Modules.Auth.Dex": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Dex package.", "properties": { "connectors": { "type": "array", - "description": "The connectors for dex" + "description": "A list with each item defining a Dex connector. Follows Dex connectors configuration format: https://dexidp.io/docs/connectors/" }, "additionalStaticClients": { "type": "array", - "description": "The additional static clients for dex" + "description": "Additional static clients defitions that will be added to the default clients included with the distribution in Dex's configuration. Example:\n\n```yaml\nadditionalStaticClients:\n - id: my-custom-client\n name: \"A custom additional static client\"\n redirectURIs:\n - \"https://myapp.tld/redirect\"\n - \"https://alias.tld/oidc-callback\"\n secret: supersecretpassword\n```\nReference: https://dexidp.io/docs/connectors/local/" }, "expiry": { "type": "object", @@ -2600,11 +2888,11 @@ "properties": { "cpu": { "type": "string", - "description": "The cpu request for the prometheus pods" + "description": "The CPU request for the Pod, in cores. Example: `500m`." }, "memory": { "type": "string", - "description": "The memory request for the opensearch pods" + "description": "The memory request for the Pod. Example: `500M`." } } }, @@ -2614,11 +2902,11 @@ "properties": { "cpu": { "type": "string", - "description": "The cpu limit for the opensearch pods" + "description": "The CPU limit for the Pod. Example: `1000m`." }, "memory": { "type": "string", - "description": "The memory limit for the opensearch pods" + "description": "The memory limit for the Pod. Example: `1G`." } } } @@ -2626,11 +2914,12 @@ }, "Types.FuryModuleOverrides": { "type": "object", + "description": "Override the common configuration with a particular configuration for the module.", "additionalProperties": false, "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the dr module" + "description": "Set to override the node selector used to place the pods of the module." }, "tolerations": { "type": [ @@ -2640,7 +2929,7 @@ "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for the monitoring module" + "description": "Set to override the tolerations that will be added to the pods of the module." }, "ingresses": { "type": "object", @@ -2656,7 +2945,7 @@ "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the minio module" + "description": "Set to override the node selector used to place the pods of the package." }, "tolerations": { "type": [ @@ -2666,7 +2955,7 @@ "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for the cert-manager module" + "description": "Set to override the tolerations that will be added to the pods of the package." } } }, @@ -2676,7 +2965,7 @@ "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the load balancer controller module" + "description": "The node selector to use to place the pods for the load balancer controller module." }, "tolerations": { "type": [ @@ -2686,7 +2975,7 @@ "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for the cluster autoscaler module" + "description": "The tolerations that will be added to the pods for the cluster autoscaler module." }, "iamRoleName": { "$ref": "#/$defs/Types.AwsIamRoleName" @@ -2699,15 +2988,15 @@ "properties": { "disableAuth": { "type": "boolean", - "description": "If true, the ingress will not have authentication" + "description": "If true, the ingress will not have authentication even if `.spec.modules.auth.provider.type` is SSO or Basic Auth." }, "host": { "type": "string", - "description": "The host of the ingress" + "description": "Use this host for the ingress instead of the default one." }, "ingressClass": { "type": "string", - "description": "The ingress class of the ingress" + "description": "Use this ingress class for the ingress instead of the default one." } } } diff --git a/schemas/public/kfddistribution-kfd-v1alpha2.json b/schemas/public/kfddistribution-kfd-v1alpha2.json index d8e8377a8..c2d0302b7 100644 --- a/schemas/public/kfddistribution-kfd-v1alpha2.json +++ b/schemas/public/kfddistribution-kfd-v1alpha2.json @@ -1,6 +1,6 @@ { "$schema": "http://json-schema.org/draft-07/schema#", - "description": "", + "description": "KFD modules deployed on top of an existing Kubernetes cluster.", "type": "object", "properties": { "apiVersion": { @@ -34,6 +34,7 @@ "properties": { "name": { "type": "string", + "description": "The name of the cluster. It will also be used as a prefix for all the other resources created.", "minLength": 1, "maxLength": 56 } @@ -48,6 +49,7 @@ "properties": { "distributionVersion": { "type": "string", + "description": "Defines which KFD version will be installed and, in consequence, the Kubernetes version used to create the cluster. It supports git tags and branches. Example: `v1.30.1`.", "minLength": 1 }, "distribution": { @@ -68,7 +70,7 @@ "properties": { "kubeconfig": { "type": "string", - "description": "The kubeconfig file path" + "description": "The path to the kubeconfig file." }, "common": { "$ref": "#/$defs/Spec.Distribution.Common" @@ -134,28 +136,29 @@ "Spec.Distribution.Common": { "type": "object", "additionalProperties": false, + "description": "Common configuration for all the distribution modules.", "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for all the KFD modules" + "description": "The node selector to use to place the pods for all the KFD modules. Follows Kubernetes selector format. Example: `node.kubernetes.io/role: infra`." }, "tolerations": { "type": "array", "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for all the KFD modules" + "description": "An array with the tolerations that will be added to the pods for all the KFD modules. Follows Kubernetes tolerations format. Example:\n\n```yaml\n- effect: NoSchedule\n key: node.kubernetes.io/role\n value: infra\n```" }, "provider": { "$ref": "#/$defs/Spec.Distribution.Common.Provider" }, "relativeVendorPath": { "type": "string", - "description": "The relative path to the vendor directory, does not need to be changed" + "description": "The relative path to the vendor directory, does not need to be changed." }, "registry": { "type": "string", - "description": "URL of the registry where to pull images from for the Distribution phase. (Default is registry.sighup.io/fury).\n\nNOTE: If plugins are pulling from the default registry, the registry will be replaced for the plugin too." + "description": "URL of the registry where to pull images from for the Distribution phase. (Default is `registry.sighup.io/fury`).\n\nNOTE: If plugins are pulling from the default registry, the registry will be replaced for the plugin too." } } }, @@ -165,7 +168,7 @@ "properties": { "type": { "type": "string", - "description": "The type of the provider" + "description": "The provider type. Don't set. FOR INTERNAL USE ONLY." } }, "required": [ @@ -217,14 +220,15 @@ }, "baseDomain": { "type": "string", - "description": "the base domain used for all the KFD ingresses, if in the nginx dual configuration, it should be the same as the .spec.distribution.modules.ingress.dns.private.name zone" + "description": "The base domain used for all the KFD infrastructural ingresses. If using the nginx `dual` type, this value should be the same as the domain associated with the `internal` ingress class." }, "nginx": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx", - "description": "Configurations for the nginx ingress controller module" + "description": "Configurations for the Ingress nginx controller package." }, "certManager": { - "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.CertManager" + "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.CertManager", + "description": "Configuration for the cert-manager package. Required even if `ingress.nginx.type` is `none`, cert-manager is used for managing other certificates in the cluster besides the TLS termination certificates for the ingresses." }, "forecastle": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Forecastle" @@ -258,20 +262,21 @@ "Spec.Distribution.Modules.Ingress.Overrides": { "type": "object", "additionalProperties": false, + "description": "Override the common configuration with a particular configuration for the Ingress module.", "properties": { "ingresses": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Overrides.Ingresses" }, "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the ingress module" + "description": "Set to override the node selector used to place the pods of the Ingress module." }, "tolerations": { "type": "array", "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for the ingress module" + "description": "Set to override the tolerations that will be added to the pods of the Ingress module." } } }, @@ -304,7 +309,7 @@ "single", "dual" ], - "description": "The type of the nginx ingress controller, must be ***none***, ***single*** or ***dual***" + "description": "The type of the Ingress nginx controller, options are:\n- `none`: no ingress controller will be installed and no infrastructural ingresses will be created.\n- `single`: a single ingress controller with ingress class `nginx` will be installed to manage all the ingress resources, infrastructural ingresses will be created.\n- `dual`: two independent ingress controllers will be installed, one for the `internal` ingress class intended for private ingresses and one for the `external` ingress class intended for public ingresses. KFD infrastructural ingresses wil use the `internal` ingress class when using the dual type.\n\nDefault is `single`." }, "tls": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx.TLS" @@ -328,7 +333,7 @@ "secret", "none" ], - "description": "The provider of the TLS certificate, must be ***none***, ***certManager*** or ***secret***" + "description": "The provider of the TLS certificates for the ingresses, one of: `none`, `certManager`, or `secret`." }, "secret": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx.TLS.Secret" @@ -353,16 +358,19 @@ "Spec.Distribution.Modules.Ingress.Nginx.TLS.Secret": { "type": "object", "additionalProperties": false, + "description": "Kubernetes TLS secret for the ingresses TLS certificate.", "properties": { "cert": { "type": "string", - "description": "The certificate file content or you can use the file notation to get the content from a file" + "description": "The certificate file's content. You can use the `\"{file://}\"` notation to get the content from a file." }, "key": { - "type": "string" + "type": "string", + "description": "The signing key file's content. You can use the `\"{file://}\"` notation to get the content from a file." }, "ca": { - "type": "string" + "type": "string", + "description": "The Certificate Authority certificate file's content. You can use the `\"{file://}\"` notation to get the content from a file." } }, "required": [ @@ -374,6 +382,7 @@ "Spec.Distribution.Modules.Ingress.CertManager": { "type": "object", "additionalProperties": false, + "description": "Configuration for the cert-manager package. Required even if `ingress.nginx.type` is `none`, cert-manager is used for managing other certificates in the cluster besides the TLS termination certificates for the ingresses.", "properties": { "clusterIssuer": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.CertManager.ClusterIssuer" @@ -389,26 +398,27 @@ "Spec.Distribution.Modules.Ingress.CertManager.ClusterIssuer": { "type": "object", "additionalProperties": false, + "description": "Configuration for the cert-manager's ACME clusterIssuer used to request certificates from Let's Encrypt.", "properties": { "name": { "type": "string", - "description": "The name of the cluster issuer" + "description": "The name of the clusterIssuer." }, "email": { "type": "string", "format": "email", - "description": "The email of the cluster issuer" + "description": "The email address to use during the certificate issuing process." }, "type": { "type": "string", "enum": [ "http01" ], - "description": "The type of the cluster issuer, must be ***http01***" + "description": "The type of the clusterIssuer. Only `http01` challenge is supported for KFDDistribution kind. See solvers for arbitrary configurations." }, "solvers": { "type": "array", - "description": "The custom solvers configurations" + "description": "The list of challenge solvers to use instead of the default one for the `http01` challenge. Check [cert manager's documentation](https://cert-manager.io/docs/configuration/acme/#adding-multiple-solver-types) for examples for this field." } }, "required": [ @@ -431,6 +441,7 @@ "Spec.Distribution.Modules.Logging": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Logging module.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -443,7 +454,7 @@ "loki", "customOutputs" ], - "description": "selects the logging stack. Choosing none will disable the centralized logging. Choosing opensearch will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored. Choosing loki will use a distributed Grafana Loki instead of OpenSearh for storage. Choosing customOuput the Logging Operator will be deployed and installed but with no local storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage." + "description": "Selects the logging stack. Options are:\n- `none`: will disable the centralized logging.\n- `opensearch`: will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored.\n- `loki`: will use a distributed Grafana Loki instead of OpenSearch for storage.\n- `customOuputs`: the Logging Operator will be deployed and installed but without in-cluster storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage.\n\nDefault is `opensearch`." }, "opensearch": { "$ref": "#/$defs/Spec.Distribution.Modules.Logging.Opensearch" @@ -482,6 +493,20 @@ ] } }, + { + "if": { + "properties": { + "type": { + "const": "loki" + } + } + }, + "then": { + "required": [ + "loki" + ] + } + }, { "if": { "properties": { @@ -508,14 +533,14 @@ "single", "triple" ], - "description": "The type of the opensearch, must be ***single*** or ***triple***" + "description": "The type of OpenSearch deployment. One of: `single` for a single replica or `triple` for an HA 3-replicas deployment." }, "resources": { "$ref": "#/$defs/Types.KubeResources" }, "storageSize": { "type": "string", - "description": "The storage size for the opensearch pods" + "description": "The storage size for the OpenSearch volumes. Follows Kubernetes resources storage requests. Default is `150Gi`." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" @@ -527,6 +552,7 @@ }, "Spec.Distribution.Modules.Logging.Cerebro": { "type": "object", + "description": "DEPRECATED since KFD v1.26.6, 1.27.5, v1.28.0.", "additionalProperties": false, "properties": { "overrides": { @@ -537,10 +563,11 @@ "Spec.Distribution.Modules.Logging.Minio": { "type": "object", "additionalProperties": false, + "description": "Configuration for Logging's MinIO deployment.", "properties": { "storageSize": { "type": "string", - "description": "The PVC size for each minio disk, 6 disks total" + "description": "The PVC size for each MinIO disk, 6 disks total." }, "rootUser": { "type": "object", @@ -548,11 +575,11 @@ "properties": { "username": { "type": "string", - "description": "The username of the minio root user" + "description": "The username for the default MinIO root user." }, "password": { "type": "string", - "description": "The password of the minio root user" + "description": "The password for the default MinIO root user." } } }, @@ -563,10 +590,12 @@ }, "Spec.Distribution.Modules.Logging.Loki": { "type": "object", + "description": "Configuration for the Loki package.", "additionalProperties": false, "properties": { "backend": { "type": "string", + "description": "The storage backend type for Loki. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external object storage instead of deploying an in-cluster MinIO.", "enum": [ "minio", "externalEndpoint" @@ -575,37 +604,47 @@ "externalEndpoint": { "type": "object", "additionalProperties": false, + "description": "Configuration for Loki's external storage backend.", "properties": { "endpoint": { "type": "string", - "description": "The endpoint of the loki external endpoint" + "description": "External S3-compatible endpoint for Loki's storage." }, "insecure": { "type": "boolean", - "description": "If true, the loki external endpoint will be insecure" + "description": "If true, will use HTTP as protocol instead of HTTPS." }, "secretAccessKey": { "type": "string", - "description": "The secret access key of the loki external endpoint" + "description": "The secret access key (password) for the external S3-compatible bucket." }, "accessKeyId": { "type": "string", - "description": "The access key id of the loki external endpoint" + "description": "The access key ID (username) for the external S3-compatible bucket." }, "bucketName": { "type": "string", - "description": "The bucket name of the loki external endpoint" + "description": "The bucket name of the external S3-compatible object storage." } } }, + "tsdbStartDate": { + "type": "string", + "format": "date", + "description": "Starting from versions 1.28.4, 1.29.5 and 1.30.0 of KFD, Loki will change the time series database from BoltDB to TSDB and the schema from v11 to v13 that it uses to store the logs.\n\nThe value of this field will determine the date when Loki will start writing using the new TSDB and the schema v13, always at midnight UTC. The old BoltDB and schema will be kept until they expire for reading purposes.\n\nValue must be a string in `ISO 8601` date format (`yyyy-mm-dd`). Example: `2024-11-18`." + }, "resources": { "$ref": "#/$defs/Types.KubeResources" } - } + }, + "required": [ + "tsdbStartDate" + ] }, "Spec.Distribution.Modules.Logging.Operator": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Logging Operator.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" @@ -613,41 +652,41 @@ } }, "Spec.Distribution.Modules.Logging.CustomOutputs": { - "description": "when using the customOutputs logging type, you need to manually specify the spec of the several Output and ClusterOutputs that the Logging Operator expects to forward the logs collected by the pre-defined flows.", + "description": "When using the `customOutputs` logging type, you need to manually specify the spec of the several `Output` and `ClusterOutputs` that the Logging Operator expects to forward the logs collected by the pre-defined flows.", "type": "object", "additionalProperties": false, "properties": { "audit": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `audit` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "events": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `events` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "infra": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `infra` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "ingressNginx": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `ingressNginx` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "kubernetes": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `kubernetes` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "systemdCommon": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `systemdCommon` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "systemdEtcd": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `systemdEtcd` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "errors": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `errors` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" } }, "required": [ @@ -664,7 +703,7 @@ "Spec.Distribution.Modules.Monitoring": { "type": "object", "additionalProperties": false, - "description": "configuration for the Monitoring module components", + "description": "Configuration for the Monitoring module.", "properties": { "type": { "type": "string", @@ -674,7 +713,7 @@ "prometheusAgent", "mimir" ], - "description": "The type of the monitoring, must be ***none***, ***prometheus***, ***prometheusAgent*** or ***mimir***.\n\n- `none`: will disable the whole monitoring stack.\n- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instace, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more.\n- `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster.\n- `mimir`: will install the same as the `prometheus` option, and in addition Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage." + "description": "The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or `mimir`.\n\n- `none`: will disable the whole monitoring stack.\n- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instance, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more.\n- `prometheusAgent`: will install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster.\n- `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage.\n\nDefault is `prometheus`." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -720,15 +759,15 @@ }, "retentionTime": { "type": "string", - "description": "The retention time for the K8s Prometheus instance." + "description": "The retention time for the `k8s` Prometheus instance." }, "retentionSize": { "type": "string", - "description": "The retention size for the k8s Prometheus instance." + "description": "The retention size for the `k8s` Prometheus instance." }, "storageSize": { "type": "string", - "description": "The storage size for the k8s Prometheus instance." + "description": "The storage size for the `k8s` Prometheus instance." }, "remoteWrite": { "description": "Set this option to ship the collected metrics to a remote Prometheus receiver.\n\n`remoteWrite` is an array of objects that allows configuring the [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for Prometheus. The objects in the array follow [the same schema as in the prometheus operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec).", @@ -761,15 +800,15 @@ "properties": { "deadManSwitchWebhookUrl": { "type": "string", - "description": "The webhook url to send deadman switch monitoring, for example to use with healthchecks.io" + "description": "The webhook URL to send dead man's switch monitoring, for example to use with healthchecks.io." }, "installDefaultRules": { "type": "boolean", - "description": "If true, the default rules will be installed" + "description": "Set to false to avoid installing the Prometheus rules (alerts) included with the distribution." }, "slackWebhookUrl": { "type": "string", - "description": "The slack webhook url to send alerts" + "description": "The Slack webhook URL where to send the infrastructural and workload alerts to." } } }, @@ -820,10 +859,11 @@ "Spec.Distribution.Modules.Monitoring.Mimir": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Mimir package.", "properties": { "retentionTime": { "type": "string", - "description": "The retention time for the mimir pods" + "description": "The retention time for the logs stored in Mimir. Default is `30d`. Value must match the regular expression `[0-9]+(ns|us|µs|ms|s|m|h|d|w|y)` where y = 365 days." }, "backend": { "type": "string", @@ -831,31 +871,32 @@ "minio", "externalEndpoint" ], - "description": "The backend for the mimir pods, must be ***minio*** or ***externalEndpoint***" + "description": "The storage backend type for Mimir. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external S3-compatible object storage instead of deploying an in-cluster MinIO." }, "externalEndpoint": { "type": "object", "additionalProperties": false, + "description": "Configuration for Mimir's external storage backend.", "properties": { "endpoint": { "type": "string", - "description": "The endpoint of the external mimir backend" + "description": "The external S3-compatible endpoint for Mimir's storage." }, "insecure": { "type": "boolean", - "description": "If true, the external mimir backend will not use tls" + "description": "If true, will use HTTP as protocol instead of HTTPS." }, "secretAccessKey": { "type": "string", - "description": "The secret access key of the external mimir backend" + "description": "The secret access key (password) for the external S3-compatible bucket." }, "accessKeyId": { "type": "string", - "description": "The access key id of the external mimir backend" + "description": "The access key ID (username) for the external S3-compatible bucket." }, "bucketName": { "type": "string", - "description": "The bucket name of the external mimir backend" + "description": "The bucket name of the external S3-compatible object storage." } } }, @@ -866,11 +907,12 @@ }, "Spec.Distribution.Modules.Monitoring.Minio": { "type": "object", + "description": "Configuration for Monitoring's MinIO deployment.", "additionalProperties": false, "properties": { "storageSize": { "type": "string", - "description": "The storage size for the minio pods" + "description": "The PVC size for each MinIO disk, 6 disks total." }, "rootUser": { "type": "object", @@ -878,11 +920,11 @@ "properties": { "username": { "type": "string", - "description": "The username for the minio root user" + "description": "The username for the default MinIO root user." }, "password": { "type": "string", - "description": "The password for the minio root user" + "description": "The password for the default MinIO root user." } } }, @@ -894,6 +936,7 @@ "Spec.Distribution.Modules.Tracing": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Tracing module.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -904,7 +947,7 @@ "none", "tempo" ], - "description": "The type of tracing to use, either ***none*** or ***tempo***" + "description": "The type of tracing to use, either `none` or `tempo`. `none` will disable the Tracing module and `tempo` will install a Grafana Tempo deployment.\n\nDefault is `tempo`." }, "tempo": { "$ref": "#/$defs/Spec.Distribution.Modules.Tracing.Tempo" @@ -920,10 +963,11 @@ "Spec.Distribution.Modules.Tracing.Tempo": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Tempo package.", "properties": { "retentionTime": { "type": "string", - "description": "The retention time for the tempo pods" + "description": "The retention time for the traces stored in Tempo." }, "backend": { "type": "string", @@ -931,31 +975,32 @@ "minio", "externalEndpoint" ], - "description": "The backend for the tempo pods, must be ***minio*** or ***externalEndpoint***" + "description": "The storage backend type for Tempo. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external S3-compatible object storage instead of deploying an in-cluster MinIO." }, "externalEndpoint": { + "description": "Configuration for Tempo's external storage backend.", "type": "object", "additionalProperties": false, "properties": { "endpoint": { "type": "string", - "description": "The endpoint of the external tempo backend" + "description": "The external S3-compatible endpoint for Tempo's storage." }, "insecure": { "type": "boolean", - "description": "If true, the external tempo backend will not use tls" + "description": "If true, will use HTTP as protocol instead of HTTPS." }, "secretAccessKey": { "type": "string", - "description": "The secret access key of the external tempo backend" + "description": "The secret access key (password) for the external S3-compatible bucket." }, "accessKeyId": { "type": "string", - "description": "The access key id of the external tempo backend" + "description": "The access key ID (username) for the external S3-compatible bucket." }, "bucketName": { "type": "string", - "description": "The bucket name of the external tempo backend" + "description": "The bucket name of the external S3-compatible object storage." } } }, @@ -966,11 +1011,12 @@ }, "Spec.Distribution.Modules.Tracing.Minio": { "type": "object", + "description": "Configuration for Tracing's MinIO deployment.", "additionalProperties": false, "properties": { "storageSize": { "type": "string", - "description": "The storage size for the minio pods" + "description": "The PVC size for each MinIO disk, 6 disks total." }, "rootUser": { "type": "object", @@ -978,11 +1024,11 @@ "properties": { "username": { "type": "string", - "description": "The username for the minio root user" + "description": "The username for the default MinIO root user." }, "password": { "type": "string", - "description": "The password for the minio root user" + "description": "The password for the default MinIO root user." } } }, @@ -994,6 +1040,7 @@ "Spec.Distribution.Modules.Networking": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Networking module.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -1011,7 +1058,7 @@ "calico", "cilium" ], - "description": "The type of networking to use, either ***none***, ***calico*** or ***cilium***" + "description": "The type of CNI plugin to use, either `none`, `calico` (Tigera Operator) or `cilium`." } }, "required": [ @@ -1048,10 +1095,12 @@ "additionalProperties": false, "properties": { "podCidr": { - "$ref": "#/$defs/Types.Cidr" + "$ref": "#/$defs/Types.Cidr", + "description": "Allows specifing a CIDR for the Pods network different from `.spec.kubernetes.podCidr`. If not set the default is to use `.spec.kubernetes.podCidr`." }, "maskSize": { - "type": "string" + "type": "string", + "description": "The mask size to use for the Pods network on each node." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" @@ -1065,6 +1114,7 @@ "Spec.Distribution.Modules.Policy": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Policy module.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -1076,7 +1126,7 @@ "gatekeeper", "kyverno" ], - "description": "The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno***" + "description": "The type of policy enforcement to use, either `none`, `gatekeeper` or `kyverno`.\n\nDefault is `none`." }, "gatekeeper": { "$ref": "#/$defs/Spec.Distribution.Modules.Policy.Gatekeeper" @@ -1122,6 +1172,7 @@ "Spec.Distribution.Modules.Policy.Gatekeeper": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Gatekeeper package.", "properties": { "additionalExcludedNamespaces": { "type": "array", @@ -1137,11 +1188,11 @@ "dryrun", "warn" ], - "description": "The enforcement action to use for the gatekeeper module" + "description": "The default enforcement action to use for the included constraints. `deny` will block the admission when violations to the policies are found, `warn` will show a message to the user but will admit the violating requests and `dryrun` won't give any feedback to the user but it will log the violations." }, "installDefaultPolicies": { "type": "boolean", - "description": "If true, the default policies will be installed" + "description": "Set to `false` to avoid installing the default Gatekeeper policies (constraints templates and constraints) included with the distribution." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" @@ -1155,25 +1206,26 @@ "Spec.Distribution.Modules.Policy.Kyverno": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Kyverno package.", "properties": { "additionalExcludedNamespaces": { "type": "array", "items": { "type": "string" }, - "description": "This parameter adds namespaces to Kyverno's exemption list, so it will not enforce the constraints on them." + "description": "This parameter adds namespaces to Kyverno's exemption list, so it will not enforce the policies on them." }, "validationFailureAction": { "type": "string", "enum": [ - "audit", - "enforce" + "Audit", + "Enforce" ], - "description": "The validation failure action to use for the kyverno module" + "description": "The validation failure action to use for the policies, `Enforce` will block when a request does not comply with the policies and `Audit` will not block but log when a request does not comply with the policies." }, "installDefaultPolicies": { "type": "boolean", - "description": "If true, the default policies will be installed" + "description": "Set to `false` to avoid installing the default Kyverno policies included with distribution." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" @@ -1187,6 +1239,7 @@ "Spec.Distribution.Modules.Dr": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Disaster Recovery module.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -1197,7 +1250,7 @@ "none", "on-premises" ], - "description": "The type of the DR, must be ***none*** or ***on-premises***" + "description": "The type of the Disaster Recovery, must be `none` or `on-premises`. `none` disables the module and `on-premises` will install Velero and an optional MinIO deployment.\n\nDefault is `none`." }, "velero": { "$ref": "#/$defs/Spec.Distribution.Modules.Dr.Velero" @@ -1223,42 +1276,103 @@ "Spec.Distribution.Modules.Dr.Velero": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Velero package.", "properties": { - "retentionTime": { - "type": "string", - "description": "The retention time for velero" - }, "backend": { "type": "string", "enum": [ "minio", "externalEndpoint" ], - "description": "The backend for velero" + "description": "The storage backend type for Velero. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external S3-compatible object storage instead of deploying an in-cluster MinIO." }, "externalEndpoint": { "type": "object", "additionalProperties": false, + "description": "Configuration for Velero's external storage backend.", "properties": { "endpoint": { "type": "string", - "description": "The endpoint for velero" + "description": "External S3-compatible endpoint for Velero's storage." }, "insecure": { "type": "boolean", - "description": "If true, the endpoint will be insecure" + "description": "If true, will use HTTP as protocol instead of HTTPS." }, "secretAccessKey": { "type": "string", - "description": "The secret access key for velero backend" + "description": "The secret access key (password) for the external S3-compatible bucket." }, "accessKeyId": { "type": "string", - "description": "The access key id for velero backend" + "description": "The access key ID (username) for the external S3-compatible bucket." }, "bucketName": { "type": "string", - "description": "The bucket name for velero backend" + "description": "The bucket name of the external S3-compatible object storage." + } + } + }, + "schedules": { + "type": "object", + "additionalProperties": false, + "description": "Configuration for Velero's backup schedules.", + "properties": { + "install": { + "type": "boolean", + "description": "Whether to install or not the default `manifests` and `full` backups schedules. Default is `true`." + }, + "definitions": { + "type": "object", + "additionalProperties": false, + "description": "Configuration for Velero schedules.", + "properties": { + "manifests": { + "type": "object", + "additionalProperties": false, + "description": "Configuration for Velero's manifests backup schedule.", + "properties": { + "schedule": { + "type": "string", + "description": "The cron expression for the `manifests` backup schedule (default `*/15 * * * *`)." + }, + "ttl": { + "type": "string", + "description": "The Time To Live (TTL) of the backups created by the backup schedules (default `720h0m0s`, 30 days). Notice that changing this value will affect only newly created backups, prior backups will keep the old TTL." + } + } + }, + "full": { + "type": "object", + "additionalProperties": false, + "description": "Configuration for Velero's manifests backup schedule.", + "properties": { + "schedule": { + "type": "string", + "description": "The cron expression for the `full` backup schedule (default `0 1 * * *`)." + }, + "ttl": { + "type": "string", + "description": "The Time To Live (TTL) of the backups created by the backup schedules (default `720h0m0s`, 30 days). Notice that changing this value will affect only newly created backups, prior backups will keep the old TTL." + }, + "snapshotMoveData": { + "type": "boolean", + "description": "EXPERIMENTAL (if you do more than one backups, the following backups after the first are not automatically restorable, see https://github.com/vmware-tanzu/velero/issues/7057#issuecomment-2466815898 for the manual restore solution): SnapshotMoveData specifies whether snapshot data should be moved. Velero will create a new volume from the snapshot and upload the content to the storageLocation." + } + } + } + } + } + } + }, + "snapshotController": { + "type": "object", + "additionalProperties": false, + "description": "Configuration for the additional snapshotController component installation.", + "properties": { + "install": { + "type": "boolean", + "description": "Whether to install or not the snapshotController component in the cluster. Before enabling this field, check if your CSI driver does not have snapshotController built-in." } } }, @@ -1270,6 +1384,7 @@ "Spec.Distribution.Modules.Auth": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Auth module.", "properties": { "overrides": { "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Overrides" @@ -1279,7 +1394,7 @@ }, "baseDomain": { "type": "string", - "description": "The base domain for the auth module" + "description": "The base domain for the ingresses created by the Auth module (Gangplank, Pomerium, Dex). Notice that when the ingress module type is `dual`, these will use the `external` ingress class." }, "pomerium": { "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Pomerium" @@ -1358,10 +1473,11 @@ "Spec.Distribution.Modules.Auth.Overrides": { "type": "object", "additionalProperties": false, + "description": "Override the common configuration with a particular configuration for the Auth module.", "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the auth module" + "description": "Set to override the node selector used to place the pods of the Auth module." }, "tolerations": { "type": [ @@ -1371,10 +1487,11 @@ "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for the auth module" + "description": "Set to override the tolerations that will be added to the pods of the Auth module." }, "ingresses": { "type": "object", + "description": "Override the definition of the Auth module ingresses.", "additionalProperties": { "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Overrides.Ingress" } @@ -1387,11 +1504,11 @@ "properties": { "host": { "type": "string", - "description": "The host of the ingress" + "description": "Use this host for the ingress instead of the default one." }, "ingressClass": { "type": "string", - "description": "The ingress class of the ingress" + "description": "Use this ingress class for the ingress instead of the default one." } }, "required": [ @@ -1410,7 +1527,7 @@ "basicAuth", "sso" ], - "description": "The type of the provider, must be ***none***, ***sso*** or ***basicAuth***" + "description": "The type of the Auth provider, options are:\n- `none`: will disable authentication in the infrastructural ingresses.\n- `sso`: will protect the infrastructural ingresses with Pomerium and Dex (SSO) and require authentication before accessing them.\n- `basicAuth`: will protect the infrastructural ingresses with HTTP basic auth (username and password) authentication.\n\nDefault is `none`." }, "basicAuth": { "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Provider.BasicAuth" @@ -1423,14 +1540,15 @@ "Spec.Distribution.Modules.Auth.Provider.BasicAuth": { "type": "object", "additionalProperties": false, + "description": "Configuration for the HTTP Basic Auth provider.", "properties": { "username": { "type": "string", - "description": "The username for the basic auth" + "description": "The username for logging in with the HTTP basic authentication." }, "password": { "type": "string", - "description": "The password for the basic auth" + "description": "The password for logging in with the HTTP basic authentication." } }, "required": [ @@ -1444,14 +1562,15 @@ "Spec.Distribution.Modules.Auth.Dex": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Dex package.", "properties": { "connectors": { "type": "array", - "description": "The connectors for dex" + "description": "A list with each item defining a Dex connector. Follows Dex connectors configuration format: https://dexidp.io/docs/connectors/" }, "additionalStaticClients": { "type": "array", - "description": "The additional static clients for dex" + "description": "Additional static clients defitions that will be added to the default clients included with the distribution in Dex's configuration. Example:\n\n```yaml\nadditionalStaticClients:\n - id: my-custom-client\n name: \"A custom additional static client\"\n redirectURIs:\n - \"https://myapp.tld/redirect\"\n - \"https://alias.tld/oidc-callback\"\n secret: supersecretpassword\n```\nReference: https://dexidp.io/docs/connectors/local/" }, "expiry": { "type": "object", @@ -1515,11 +1634,29 @@ } }, "Types.KubeTaints": { - "type": "array", - "items": { - "type": "string", - "pattern": "^([a-zA-Z0-9\\-\\.\\/]+)=(\\w+):(NoSchedule|PreferNoSchedule|NoExecute)$" - } + "type": "object", + "additionalProperties": false, + "properties": { + "effect": { + "type": "string", + "enum": [ + "NoSchedule", + "PreferNoSchedule", + "NoExecute" + ] + }, + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "effect", + "key", + "value" + ] }, "Types.KubeNodeSelector": { "type": [ @@ -1585,11 +1722,11 @@ "properties": { "cpu": { "type": "string", - "description": "The cpu request for the prometheus pods" + "description": "The CPU request for the Pod, in cores. Example: `500m`." }, "memory": { "type": "string", - "description": "The memory request for the opensearch pods" + "description": "The memory request for the Pod. Example: `500M`." } } }, @@ -1599,11 +1736,11 @@ "properties": { "cpu": { "type": "string", - "description": "The cpu limit for the loki pods" + "description": "The CPU limit for the Pod. Example: `1000m`." }, "memory": { "type": "string", - "description": "The memory limit for the opensearch pods" + "description": "The memory limit for the Pod. Example: `1G`." } } } @@ -1611,11 +1748,12 @@ }, "Types.FuryModuleOverrides": { "type": "object", + "description": "Override the common configuration with a particular configuration for the module.", "additionalProperties": false, "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the security module" + "description": "Set to override the node selector used to place the pods of the module." }, "tolerations": { "type": [ @@ -1625,7 +1763,7 @@ "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for the monitoring module" + "description": "Set to override the tolerations that will be added to the pods of the module." }, "ingresses": { "type": "object", @@ -1641,7 +1779,7 @@ "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the minio module" + "description": "Set to override the node selector used to place the pods of the package." }, "tolerations": { "type": [ @@ -1651,7 +1789,7 @@ "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for the cert-manager module" + "description": "Set to override the tolerations that will be added to the pods of the package." } } }, @@ -1661,15 +1799,15 @@ "properties": { "disableAuth": { "type": "boolean", - "description": "If true, the ingress will not have authentication" + "description": "If true, the ingress will not have authentication even if `.spec.modules.auth.provider.type` is SSO or Basic Auth." }, "host": { "type": "string", - "description": "The host of the ingress" + "description": "Use this host for the ingress instead of the default one." }, "ingressClass": { "type": "string", - "description": "The ingress class of the ingress" + "description": "Use this ingress class for the ingress instead of the default one." } } } diff --git a/schemas/public/onpremises-kfd-v1alpha2.json b/schemas/public/onpremises-kfd-v1alpha2.json index 84446d1f0..e49d59cf0 100644 --- a/schemas/public/onpremises-kfd-v1alpha2.json +++ b/schemas/public/onpremises-kfd-v1alpha2.json @@ -1,6 +1,6 @@ { "$schema": "http://json-schema.org/draft-07/schema#", - "description": "", + "description": "A KFD Cluster deployed on top of a set of existing VMs.", "type": "object", "properties": { "apiVersion": { @@ -34,6 +34,7 @@ "properties": { "name": { "type": "string", + "description": "The name of the cluster. It will also be used as a prefix for all the other resources created.", "minLength": 1, "maxLength": 56 } @@ -48,6 +49,7 @@ "properties": { "distributionVersion": { "type": "string", + "description": "Defines which KFD version will be installed and, in consequence, the Kubernetes version used to create the cluster. It supports git tags and branches. Example: `v1.30.1`.", "minLength": 1 }, "kubernetes": { @@ -68,29 +70,30 @@ "Spec.Kubernetes": { "type": "object", "additionalProperties": false, + "description": "Defines the Kubernetes components configuration and the values needed for the kubernetes phase of furyctl.", "properties": { "pkiFolder": { "type": "string", - "description": "The folder where the PKI will be stored" + "description": "The path to the folder where the PKI files for Kubernetes and etcd are stored." }, "ssh": { "$ref": "#/$defs/Spec.Kubernetes.SSH" }, "dnsZone": { "type": "string", - "description": "The DNS zone to use for the cluster" + "description": "The DNS zone of the machines. It will be appended to the name of each host to generate the `kubernetes_hostname` in the Ansible inventory file. It is also used to calculate etcd's initial cluster value." }, "controlPlaneAddress": { "type": "string", - "description": "The address of the control plane" + "description": "The address for the Kubernetes control plane. Usually a DNS entry pointing to a Load Balancer on port 6443." }, "podCidr": { "$ref": "#/$defs/Types.Cidr", - "description": "The CIDR to use for the pods" + "description": "The subnet CIDR to use for the Pods network." }, "svcCidr": { "$ref": "#/$defs/Types.Cidr", - "description": "The CIDR to use for the services" + "description": "The subnet CIDR to use for the Services network." }, "proxy": { "$ref": "#/$defs/Spec.Kubernetes.Proxy" @@ -126,14 +129,15 @@ "Spec.Kubernetes.SSH": { "type": "object", "additionalProperties": false, + "description": "SSH credentials to access the hosts", "properties": { "username": { "type": "string", - "description": "The username to use to connect to the nodes" + "description": "The username to use to connect to the hosts" }, "keyPath": { "type": "string", - "description": "The path to the private key to use to connect to the nodes" + "description": "The path to the private key to use to connect to the hosts" } }, "required": [ @@ -147,15 +151,15 @@ "properties": { "http": { "$ref": "#/$defs/Types.Uri", - "description": "The HTTP proxy to use" + "description": "The HTTP proxy URL. Example: http://test.example.dev:3128" }, "https": { "$ref": "#/$defs/Types.Uri", - "description": "The HTTPS proxy to use" + "description": "The HTTPS proxy URL. Example: https://test.example.dev:3128" }, "noProxy": { "type": "string", - "description": "The no proxy to use" + "description": "Comma-separated list of hosts that should not use the HTTP(S) proxy. Example:\nlocalhost,127.0.0.1,172.16.0.0/17,172.16.128.0/17,10.0.0.0/8,.example.dev" } }, "required": [] @@ -166,7 +170,7 @@ "properties": { "enabled": { "type": "boolean", - "description": "If true, the load balancers will be enabled" + "description": "Set to true to install HAProxy and configure it as a load balancer on the the load balancer hosts." }, "hosts": { "type": "array", @@ -182,7 +186,7 @@ }, "additionalConfig": { "type": "string", - "description": "The additional config to use" + "description": "Additional configuration to append to HAProxy's configuration file." } }, "required": [ @@ -209,11 +213,11 @@ "properties": { "name": { "type": "string", - "description": "The name of the host" + "description": "A name to identify the host. This value will be concatenated to `.spec.kubernetes.dnsZone` to calculate the FQDN for the host as `.`." }, "ip": { "type": "string", - "description": "The IP of the host" + "description": "The IP address of the host." } }, "required": [ @@ -227,23 +231,23 @@ "properties": { "enabled": { "type": "boolean", - "description": "If true, keepalived will be enabled" + "description": "Set to install keepalived with a floating virtual IP shared between the load balancer hosts for a deployment in High Availability." }, "interface": { "type": "string", - "description": "The interface to use" + "description": "Name of the network interface where to bind the Keepalived virtual IP." }, "ip": { "type": "string", - "description": "The IP to use" + "description": "The Virtual floating IP for Keepalived" }, "virtualRouterId": { "type": "string", - "description": "The virtual router ID to use" + "description": "The virtual router ID of Keepalived, must be different from other Keepalived instances in the same network." }, "passphrase": { "type": "string", - "description": "The passphrase to use" + "description": "The passphrase for the Keepalived clustering." } }, "required": [ @@ -268,14 +272,15 @@ "Spec.Kubernetes.LoadBalancers.Stats": { "type": "object", "additionalProperties": false, + "description": "Configuration for HAProxy stats page. Accessible at http://:1936/stats", "properties": { "username": { "type": "string", - "description": "The username to use" + "description": "The basic-auth username for HAProxy's stats page" }, "password": { "type": "string", - "description": "The password to use" + "description": "The basic-auth password for HAProxy's stats page." } }, "required": [ @@ -285,6 +290,7 @@ }, "Spec.Kubernetes.Masters": { "type": "object", + "description": "Configuration for the control plane hosts", "additionalProperties": false, "properties": { "hosts": { @@ -304,11 +310,11 @@ "properties": { "name": { "type": "string", - "description": "The name of the host" + "description": "A name to identify the host. This value will be concatenated to `.spec.kubernetes.dnsZone` to calculate the FQDN for the host as `.`." }, "ip": { "type": "string", - "description": "The IP of the host" + "description": "The IP address of the host" } }, "required": [ @@ -318,7 +324,8 @@ }, "Spec.Kubernetes.Nodes": { "type": "array", - "minItems": 1, + "description": "Configuration for the node hosts", + "minItems": 0, "items": { "$ref": "#/$defs/Spec.Kubernetes.Nodes.Node" } @@ -328,7 +335,8 @@ "additionalProperties": false, "properties": { "name": { - "type": "string" + "type": "string", + "description": "Name for the node group. It will be also used as the node role label. It should follow the [valid variable names guideline](https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_variables.html#valid-variable-names) from Ansible." }, "hosts": { "type": "array", @@ -357,9 +365,11 @@ "additionalProperties": false, "properties": { "name": { + "description": "A name to identify the host. This value will be concatenated to `.spec.kubernetes.dnsZone` to calculate the FQDN for the host as `.`.", "type": "string" }, "ip": { + "description": "The IP address of the host", "type": "string" } }, @@ -374,11 +384,11 @@ "properties": { "pythonInterpreter": { "type": "string", - "description": "The python interpreter to use" + "description": "The Python interpreter to use for running Ansible. Example: python3" }, "config": { "type": "string", - "description": "Additional config to append to the ansible.cfg file" + "description": "Additional configuration to append to the ansible.cfg file" } } }, @@ -416,11 +426,11 @@ "properties": { "provider": { "type": "string", - "description": "The cloud provider to use" + "description": "Sets the cloud provider for the Kubelet" }, "config": { "type": "string", - "description": "The cloud config to use" + "description": "Sets cloud config for the Kubelet" } } }, @@ -433,47 +443,53 @@ "items": { "type": "string" }, - "description": "The names of the users" + "description": "List of user names to create and get a kubeconfig file. Users will not have any permissions by default, RBAC setup for the new users is needed." }, "org": { "type": "string", - "description": "The org of the users" + "description": "The organization the users belong to." } } }, "Spec.Kubernetes.Advanced.OIDC": { "type": "object", "additionalProperties": false, + "description": "OIDC configuration for the Kubernetes API server.", "properties": { "issuer_url": { "type": "string", - "description": "The issuer url of the oidc provider" + "description": "The issuer URL of the OIDC provider." }, "client_id": { "type": "string", - "description": "The client id of the oidc provider" + "description": "The client ID the API server will use to authenticate to the OIDC provider." }, "ca_file": { "type": "string", - "description": "The ca file of the oidc provider" + "description": "The path to the certificate for the CA that signed the identity provider's web certificate. Defaults to the host's root CAs. This should be a path available to the API Server." }, "username_claim": { - "type": "string" + "type": "string", + "description": "JWT claim to use as the user name. The default value is `sub`, which is expected to be a unique identifier of the end user." }, "username_prefix": { - "type": "string" + "type": "string", + "description": "Prefix prepended to username claims to prevent clashes with existing names (such as system: users)." }, "groups_claim": { - "type": "string" + "type": "string", + "description": "JWT claim to use as the user's group." }, "group_prefix": { - "type": "string" + "type": "string", + "description": "Prefix prepended to group claims to prevent clashes with existing names (such as system: groups)." } } }, "Spec.Kubernetes.Advanced.Containerd": { "type": "object", "additionalProperties": false, + "description": "Advanced configuration for containerd", "properties": { "registryConfigs": { "$ref": "#/$defs/Spec.Kubernetes.Advanced.Containerd.RegistryConfigs" @@ -481,7 +497,7 @@ } }, "Spec.Kubernetes.Advanced.Containerd.RegistryConfigs": { - "description": "Allows specifying custom configuration for a registry at containerd level. You can set authentication details and mirrors for a registry.\nThis feature can be used for example to authenticate to a private registry at containerd (container runtime) level, i.e. globally instead of using `imagePullSecrets`. It also can be used to use a mirror for a registry or to enable insecure connections to trusted registries that don't support TLS.", + "description": "Allows specifying custom configuration for a registry at containerd level. You can set authentication details and mirrors for a registry.\nThis feature can be used for example to authenticate to a private registry at containerd (container runtime) level, i.e. globally instead of using `imagePullSecrets`. It also can be used to use a mirror for a registry or to enable insecure connections to trusted registries that have self-signed certificates.", "type": "array", "additionalProperties": false, "items": { @@ -489,19 +505,24 @@ "additionalProperties": false, "properties": { "registry": { - "type": "string" + "type": "string", + "description": "Registry address on which you would like to configure authentication or mirror(s). Example: `myregistry.tld:5000`" }, "username": { - "type": "string" + "type": "string", + "description": "The username containerd will use to authenticate against the registry." }, "password": { - "type": "string" + "type": "string", + "description": "The password containerd will use to authenticate against the registry." }, "insecureSkipVerify": { - "type": "boolean" + "type": "boolean", + "description": "Set to `true` to skip TLS verification (e.g. when using self-signed certificates)." }, "mirrorEndpoint": { "type": "array", + "description": "Array of URLs with the mirrors to use for the registry. Example: `[\"http://mymirror.tld:8080\"]`", "items": { "type": "string" } @@ -518,33 +539,34 @@ "items": { "type": "string" }, - "description": "The tls cipher suites to use" + "description": "The TLS cipher suites to use for etcd, kubelet, and kubeadm static pods. Example:\n```yaml\ntlsCipherSuites:\n - \"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\"\n - \"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\"\n - \"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\"\n - \"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\"\n - \"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\"\n - \"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\"\n - \"TLS_AES_128_GCM_SHA256\"\n - \"TLS_AES_256_GCM_SHA384\"\n - \"TLS_CHACHA20_POLY1305_SHA256\"\n```" }, "configuration": { "type": "string", - "description": "The configuration to use" + "description": "etcd's encryption at rest configuration. Must be a string with the EncryptionConfiguration object in YAML. Example:\n\n```yaml\n\napiVersion: apiserver.config.k8s.io/v1\nkind: EncryptionConfiguration\nresources:\n - resources:\n - secrets\n providers:\n - aescbc:\n keys:\n - name: mykey\n secret: base64_encoded_secret\n```\n" } } }, "Spec.Kubernetes.Advanced.AirGap": { "type": "object", + "description": "Advanced configuration for air-gapped installations. Allows setting custom URLs where to download the binaries dependencies from and custom .deb and .rpm package repositories.", "additionalProperties": false, "properties": { "containerdDownloadUrl": { "type": "string", - "description": "The containerd download url" + "description": "URL where to download the `.tar.gz` with containerd from. The `tar.gz` should be as the one downloaded from containerd GitHub releases page." }, "runcDownloadUrl": { "type": "string", - "description": "The runc download url" + "description": "URL where to download the runc binary from." }, "runcChecksum": { "type": "string", - "description": "The runc checksum" + "description": "Checksum for the runc binary." }, "etcdDownloadUrl": { "type": "string", - "description": "The etcd download url" + "description": "URL to the path where the etcd `tar.gz`s are available. etcd will be downloaded from `//etcd--linux-.tar.gz`" }, "dependenciesOverride": { "type": "object", @@ -556,19 +578,19 @@ "properties": { "name": { "type": "string", - "description": "The name of the apt dependency" + "description": "An indicative name for the Apt repository. Example: `k8s-1.29`" }, "repo": { "type": "string", - "description": "The repo of the apt dependency" + "description": "A source string for the new Apt repository. Example: `deb https://pkgs.k8s.io/core:/stable:/v1.29/deb/ /`" }, "gpg_key": { "type": "string", - "description": "The gpg key of the apt dependency" + "description": "URL where to download the GPG key of the Apt repository. Example: `https://pkgs.k8s.io/core:/stable:/v1.29/deb/Release.key`" }, "gpg_key_id": { "type": "string", - "description": "The gpg key id of the apt dependency" + "description": "The GPG key ID of the Apt repository. Example: `36A1D7869245C8950F966E92D8576A8BA88D21E9`" } }, "required": [ @@ -584,23 +606,23 @@ "properties": { "name": { "type": "string", - "description": "The name of the yum dependency" + "description": "An indicative name for the Yum repository. Example: `k8s-1.29`" }, "repo": { "type": "string", - "description": "The repo of the yum dependency" + "description": "URL to the directory where the Yum repository's `repodata` directory lives. Example: `https://pkgs.k8s.io/core:/stable:/v1.29/rpm/`" }, "gpg_key": { "type": "string", - "description": "The gpg key of the yum dependency" + "description": "URL where to download the ASCII-armored GPG key of the Yum repository. Example: `https://pkgs.k8s.io/core:/stable:/v1.29/deb/Release.key`" }, "gpg_key_check": { "type": "boolean", - "description": "If true, the gpg key check will be enabled" + "description": "If true, the GPG signature check on the packages will be enabled." }, "repo_gpg_check": { "type": "boolean", - "description": "If true, the repo gpg check will be enabled" + "description": "If true, the GPG signature check on the `repodata` will be enabled." } }, "required": [ @@ -682,28 +704,33 @@ "Spec.Distribution.Common": { "type": "object", "additionalProperties": false, + "description": "Common configuration for all the distribution modules.", "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for all the KFD modules" + "description": "The node selector to use to place the pods for all the KFD modules. Follows Kubernetes selector format. Example: `node.kubernetes.io/role: infra`." }, "tolerations": { "type": "array", "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for all the KFD modules" + "description": "An array with the tolerations that will be added to the pods for all the KFD modules. Follows Kubernetes tolerations format. Example:\n\n```yaml\n- effect: NoSchedule\n key: node.kubernetes.io/role\n value: infra\n```" }, "provider": { "$ref": "#/$defs/Spec.Distribution.Common.Provider" }, "relativeVendorPath": { "type": "string", - "description": "The relative path to the vendor directory, does not need to be changed" + "description": "The relative path to the vendor directory, does not need to be changed." }, "registry": { "type": "string", - "description": "URL of the registry where to pull images from for the Distribution phase. (Default is registry.sighup.io/fury)." + "description": "URL of the registry where to pull images from for the Distribution phase. (Default is `registry.sighup.io/fury`).\n\nNOTE: If plugins are pulling from the default registry, the registry will be replaced for the plugin too." + }, + "networkPoliciesEnabled": { + "type": "boolean", + "description": "EXPERIMENTAL FEATURE. This field defines whether Network Policies are provided for core modules." } } }, @@ -713,7 +740,7 @@ "properties": { "type": { "type": "string", - "description": "The type of the provider" + "description": "The provider type. Don't set. FOR INTERNAL USE ONLY." } }, "required": [ @@ -765,14 +792,15 @@ }, "baseDomain": { "type": "string", - "description": "the base domain used for all the KFD ingresses, if in the nginx dual configuration, it should be the same as the .spec.distribution.modules.ingress.dns.private.name zone" + "description": "The base domain used for all the KFD infrastructural ingresses. If using the nginx `dual` type, this value should be the same as the domain associated with the `internal` ingress class." }, "nginx": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx", - "description": "Configurations for the nginx ingress controller module" + "description": "Configurations for the Ingress nginx controller package." }, "certManager": { - "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.CertManager" + "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.CertManager", + "description": "Configuration for the cert-manager package. Required even if `ingress.nginx.type` is `none`, cert-manager is used for managing other certificates in the cluster besides the TLS termination certificates for the ingresses." }, "forecastle": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Forecastle" @@ -806,20 +834,21 @@ "Spec.Distribution.Modules.Ingress.Overrides": { "type": "object", "additionalProperties": false, + "description": "Override the common configuration with a particular configuration for the Ingress module.", "properties": { "ingresses": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Overrides.Ingresses" }, "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the ingress module" + "description": "Set to override the node selector used to place the pods of the Ingress module." }, "tolerations": { "type": "array", "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for the ingress module" + "description": "Set to override the tolerations that will be added to the pods of the Ingress module." } } }, @@ -852,7 +881,7 @@ "single", "dual" ], - "description": "The type of the nginx ingress controller, must be ***none***, ***single*** or ***dual***" + "description": "The type of the Ingress nginx controller, options are:\n- `none`: no ingress controller will be installed and no infrastructural ingresses will be created.\n- `single`: a single ingress controller with ingress class `nginx` will be installed to manage all the ingress resources, infrastructural ingresses will be created.\n- `dual`: two independent ingress controllers will be installed, one for the `internal` ingress class intended for private ingresses and one for the `external` ingress class intended for public ingresses. KFD infrastructural ingresses wil use the `internal` ingress class when using the dual type.\n\nDefault is `single`." }, "tls": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx.TLS" @@ -876,7 +905,7 @@ "secret", "none" ], - "description": "The provider of the TLS certificate, must be ***none***, ***certManager*** or ***secret***" + "description": "The provider of the TLS certificates for the ingresses, one of: `none`, `certManager`, or `secret`." }, "secret": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx.TLS.Secret" @@ -901,16 +930,19 @@ "Spec.Distribution.Modules.Ingress.Nginx.TLS.Secret": { "type": "object", "additionalProperties": false, + "description": "Kubernetes TLS secret for the ingresses TLS certificate.", "properties": { "cert": { "type": "string", - "description": "The certificate file content or you can use the file notation to get the content from a file" + "description": "The certificate file's content. You can use the `\"{file://}\"` notation to get the content from a file." }, "key": { - "type": "string" + "type": "string", + "description": "The signing key file's content. You can use the `\"{file://}\"` notation to get the content from a file." }, "ca": { - "type": "string" + "type": "string", + "description": "The Certificate Authority certificate file's content. You can use the `\"{file://}\"` notation to get the content from a file." } }, "required": [ @@ -922,6 +954,7 @@ "Spec.Distribution.Modules.Ingress.CertManager": { "type": "object", "additionalProperties": false, + "description": "Configuration for the cert-manager package. Required even if `ingress.nginx.type` is `none`, cert-manager is used for managing other certificates in the cluster besides the TLS termination certificates for the ingresses.", "properties": { "clusterIssuer": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.CertManager.ClusterIssuer" @@ -937,26 +970,27 @@ "Spec.Distribution.Modules.Ingress.CertManager.ClusterIssuer": { "type": "object", "additionalProperties": false, + "description": "Configuration for the cert-manager's ACME clusterIssuer used to request certificates from Let's Encrypt.", "properties": { "name": { "type": "string", - "description": "The name of the cluster issuer" + "description": "The name of the clusterIssuer." }, "email": { "type": "string", "format": "email", - "description": "The email of the cluster issuer" + "description": "The email address to use during the certificate issuing process." }, "type": { "type": "string", "enum": [ "http01" ], - "description": "The type of the cluster issuer, must be ***http01***" + "description": "The type of the clusterIssuer. Only `http01` challenge is supported for on-premises clusters. See solvers for arbitrary configurations." }, "solvers": { "type": "array", - "description": "The custom solvers configurations" + "description": "The list of challenge solvers to use instead of the default one for the `http01` challenge. Check [cert manager's documentation](https://cert-manager.io/docs/configuration/acme/#adding-multiple-solver-types) for examples for this field." } }, "required": [ @@ -979,6 +1013,7 @@ "Spec.Distribution.Modules.Logging": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Logging module.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -991,7 +1026,7 @@ "loki", "customOutputs" ], - "description": "selects the logging stack. Choosing none will disable the centralized logging. Choosing opensearch will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored. Choosing loki will use a distributed Grafana Loki instead of OpenSearh for storage. Choosing customOuput the Logging Operator will be deployed and installed but with no local storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage." + "description": "Selects the logging stack. Options are:\n- `none`: will disable the centralized logging.\n- `opensearch`: will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored.\n- `loki`: will use a distributed Grafana Loki instead of OpenSearch for storage.\n- `customOuputs`: the Logging Operator will be deployed and installed but without in-cluster storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage.\n\nDefault is `opensearch`." }, "opensearch": { "$ref": "#/$defs/Spec.Distribution.Modules.Logging.Opensearch" @@ -1030,6 +1065,20 @@ ] } }, + { + "if": { + "properties": { + "type": { + "const": "loki" + } + } + }, + "then": { + "required": [ + "loki" + ] + } + }, { "if": { "properties": { @@ -1056,14 +1105,14 @@ "single", "triple" ], - "description": "The type of the opensearch, must be ***single*** or ***triple***" + "description": "The type of OpenSearch deployment. One of: `single` for a single replica or `triple` for an HA 3-replicas deployment." }, "resources": { "$ref": "#/$defs/Types.KubeResources" }, "storageSize": { "type": "string", - "description": "The storage size for the opensearch pods" + "description": "The storage size for the OpenSearch volumes. Follows Kubernetes resources storage requests. Default is `150Gi`." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" @@ -1075,6 +1124,7 @@ }, "Spec.Distribution.Modules.Logging.Cerebro": { "type": "object", + "description": "DEPRECATED since KFD v1.26.6, 1.27.5, v1.28.0.", "additionalProperties": false, "properties": { "overrides": { @@ -1085,10 +1135,11 @@ "Spec.Distribution.Modules.Logging.Minio": { "type": "object", "additionalProperties": false, + "description": "Configuration for Logging's MinIO deployment.", "properties": { "storageSize": { "type": "string", - "description": "The PVC size for each minio disk, 6 disks total" + "description": "The PVC size for each MinIO disk, 6 disks total." }, "rootUser": { "type": "object", @@ -1096,11 +1147,11 @@ "properties": { "username": { "type": "string", - "description": "The username of the minio root user" + "description": "The username for the default MinIO root user." }, "password": { "type": "string", - "description": "The password of the minio root user" + "description": "The password for the default MinIO root user." } } }, @@ -1111,10 +1162,12 @@ }, "Spec.Distribution.Modules.Logging.Loki": { "type": "object", + "description": "Configuration for the Loki package.", "additionalProperties": false, "properties": { "backend": { "type": "string", + "description": "The storage backend type for Loki. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external object storage instead of deploying an in-cluster MinIO.", "enum": [ "minio", "externalEndpoint" @@ -1123,37 +1176,47 @@ "externalEndpoint": { "type": "object", "additionalProperties": false, + "description": "Configuration for Loki's external storage backend.", "properties": { "endpoint": { "type": "string", - "description": "The endpoint of the loki external endpoint" + "description": "External S3-compatible endpoint for Loki's storage." }, "insecure": { "type": "boolean", - "description": "If true, the loki external endpoint will be insecure" + "description": "If true, will use HTTP as protocol instead of HTTPS." }, "secretAccessKey": { "type": "string", - "description": "The secret access key of the loki external endpoint" + "description": "The secret access key (password) for the external S3-compatible bucket." }, "accessKeyId": { "type": "string", - "description": "The access key id of the loki external endpoint" + "description": "The access key ID (username) for the external S3-compatible bucket." }, "bucketName": { "type": "string", - "description": "The bucket name of the loki external endpoint" + "description": "The bucket name of the external S3-compatible object storage." } } }, + "tsdbStartDate": { + "type": "string", + "format": "date", + "description": "Starting from versions 1.28.4, 1.29.5 and 1.30.0 of KFD, Loki will change the time series database from BoltDB to TSDB and the schema from v11 to v13 that it uses to store the logs.\n\nThe value of this field will determine the date when Loki will start writing using the new TSDB and the schema v13, always at midnight UTC. The old BoltDB and schema will be kept until they expire for reading purposes.\n\nValue must be a string in `ISO 8601` date format (`yyyy-mm-dd`). Example: `2024-11-18`." + }, "resources": { "$ref": "#/$defs/Types.KubeResources" } - } + }, + "required": [ + "tsdbStartDate" + ] }, "Spec.Distribution.Modules.Logging.Operator": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Logging Operator.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" @@ -1161,41 +1224,41 @@ } }, "Spec.Distribution.Modules.Logging.CustomOutputs": { - "description": "when using the customOutputs logging type, you need to manually specify the spec of the several Output and ClusterOutputs that the Logging Operator expects to forward the logs collected by the pre-defined flows.", + "description": "When using the `customOutputs` logging type, you need to manually specify the spec of the several `Output` and `ClusterOutputs` that the Logging Operator expects to forward the logs collected by the pre-defined flows.", "type": "object", "additionalProperties": false, "properties": { "audit": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `audit` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "events": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `events` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "infra": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `infra` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "ingressNginx": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `ingressNginx` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "kubernetes": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `kubernetes` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "systemdCommon": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `systemdCommon` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "systemdEtcd": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `systemdEtcd` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "errors": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `errors` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" } }, "required": [ @@ -1212,7 +1275,7 @@ "Spec.Distribution.Modules.Monitoring": { "type": "object", "additionalProperties": false, - "description": "configuration for the Monitoring module components", + "description": "Configuration for the Monitoring module.", "properties": { "type": { "type": "string", @@ -1222,7 +1285,7 @@ "prometheusAgent", "mimir" ], - "description": "The type of the monitoring, must be ***none***, ***prometheus***, ***prometheusAgent*** or ***mimir***.\n\n- `none`: will disable the whole monitoring stack.\n- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instace, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more.\n- `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster.\n- `mimir`: will install the same as the `prometheus` option, and in addition Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage." + "description": "The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or `mimir`.\n\n- `none`: will disable the whole monitoring stack.\n- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instance, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more.\n- `prometheusAgent`: will install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster.\n- `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage.\n\nDefault is `prometheus`." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -1268,15 +1331,15 @@ }, "retentionTime": { "type": "string", - "description": "The retention time for the k8s Prometheus instance." + "description": "The retention time for the `k8s` Prometheus instance." }, "retentionSize": { "type": "string", - "description": "The retention size for the k8s Prometheus instance." + "description": "The retention size for the `k8s` Prometheus instance." }, "storageSize": { "type": "string", - "description": "The storage size for the k8s Prometheus instance." + "description": "The storage size for the `k8s` Prometheus instance." }, "remoteWrite": { "description": "Set this option to ship the collected metrics to a remote Prometheus receiver.\n\n`remoteWrite` is an array of objects that allows configuring the [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for Prometheus. The objects in the array follow [the same schema as in the prometheus operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec).", @@ -1309,15 +1372,15 @@ "properties": { "deadManSwitchWebhookUrl": { "type": "string", - "description": "The webhook url to send deadman switch monitoring, for example to use with healthchecks.io" + "description": "The webhook URL to send dead man's switch monitoring, for example to use with healthchecks.io." }, "installDefaultRules": { "type": "boolean", - "description": "If true, the default rules will be installed" + "description": "Set to false to avoid installing the Prometheus rules (alerts) included with the distribution." }, "slackWebhookUrl": { "type": "string", - "description": "The slack webhook url to send alerts" + "description": "The Slack webhook URL where to send the infrastructural and workload alerts to." } } }, @@ -1368,10 +1431,11 @@ "Spec.Distribution.Modules.Monitoring.Mimir": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Mimir package.", "properties": { "retentionTime": { "type": "string", - "description": "The retention time for the mimir pods" + "description": "The retention time for the logs stored in Mimir. Default is `30d`. Value must match the regular expression `[0-9]+(ns|us|µs|ms|s|m|h|d|w|y)` where y = 365 days." }, "backend": { "type": "string", @@ -1379,31 +1443,32 @@ "minio", "externalEndpoint" ], - "description": "The backend for the mimir pods, must be ***minio*** or ***externalEndpoint***" + "description": "The storage backend type for Mimir. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external S3-compatible object storage instead of deploying an in-cluster MinIO." }, "externalEndpoint": { "type": "object", "additionalProperties": false, + "description": "Configuration for Mimir's external storage backend.", "properties": { "endpoint": { "type": "string", - "description": "The endpoint of the external mimir backend" + "description": "The external S3-compatible endpoint for Mimir's storage." }, "insecure": { "type": "boolean", - "description": "If true, the external mimir backend will not use tls" + "description": "If true, will use HTTP as protocol instead of HTTPS." }, "secretAccessKey": { "type": "string", - "description": "The secret access key of the external mimir backend" + "description": "The secret access key (password) for the external S3-compatible bucket." }, "accessKeyId": { "type": "string", - "description": "The access key id of the external mimir backend" + "description": "The access key ID (username) for the external S3-compatible bucket." }, "bucketName": { "type": "string", - "description": "The bucket name of the external mimir backend" + "description": "The bucket name of the external S3-compatible object storage." } } }, @@ -1414,11 +1479,12 @@ }, "Spec.Distribution.Modules.Monitoring.Minio": { "type": "object", + "description": "Configuration for Monitoring's MinIO deployment.", "additionalProperties": false, "properties": { "storageSize": { "type": "string", - "description": "The storage size for the minio pods" + "description": "The PVC size for each MinIO disk, 6 disks total." }, "rootUser": { "type": "object", @@ -1426,11 +1492,11 @@ "properties": { "username": { "type": "string", - "description": "The username for the minio root user" + "description": "The username for the default MinIO root user." }, "password": { "type": "string", - "description": "The password for the minio root user" + "description": "The password for the default MinIO root user." } } }, @@ -1442,6 +1508,7 @@ "Spec.Distribution.Modules.Tracing": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Tracing module.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -1452,7 +1519,7 @@ "none", "tempo" ], - "description": "The type of tracing to use, either ***none*** or ***tempo***" + "description": "The type of tracing to use, either `none` or `tempo`. `none` will disable the Tracing module and `tempo` will install a Grafana Tempo deployment.\n\nDefault is `tempo`." }, "tempo": { "$ref": "#/$defs/Spec.Distribution.Modules.Tracing.Tempo" @@ -1468,10 +1535,11 @@ "Spec.Distribution.Modules.Tracing.Tempo": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Tempo package.", "properties": { "retentionTime": { "type": "string", - "description": "The retention time for the tempo pods" + "description": "The retention time for the traces stored in Tempo." }, "backend": { "type": "string", @@ -1479,31 +1547,32 @@ "minio", "externalEndpoint" ], - "description": "The backend for the tempo pods, must be ***minio*** or ***externalEndpoint***" + "description": "The storage backend type for Tempo. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external S3-compatible object storage instead of deploying an in-cluster MinIO." }, "externalEndpoint": { + "description": "Configuration for Tempo's external storage backend.", "type": "object", "additionalProperties": false, "properties": { "endpoint": { "type": "string", - "description": "The endpoint of the external tempo backend" + "description": "The external S3-compatible endpoint for Tempo's storage." }, "insecure": { "type": "boolean", - "description": "If true, the external tempo backend will not use tls" + "description": "If true, will use HTTP as protocol instead of HTTPS." }, "secretAccessKey": { "type": "string", - "description": "The secret access key of the external tempo backend" + "description": "The secret access key (password) for the external S3-compatible bucket." }, "accessKeyId": { "type": "string", - "description": "The access key id of the external tempo backend" + "description": "The access key ID (username) for the external S3-compatible bucket." }, "bucketName": { "type": "string", - "description": "The bucket name of the external tempo backend" + "description": "The bucket name of the external S3-compatible object storage." } } }, @@ -1514,11 +1583,12 @@ }, "Spec.Distribution.Modules.Tracing.Minio": { "type": "object", + "description": "Configuration for Tracing's MinIO deployment.", "additionalProperties": false, "properties": { "storageSize": { "type": "string", - "description": "The storage size for the minio pods" + "description": "The PVC size for each MinIO disk, 6 disks total." }, "rootUser": { "type": "object", @@ -1526,11 +1596,11 @@ "properties": { "username": { "type": "string", - "description": "The username for the minio root user" + "description": "The username for the default MinIO root user." }, "password": { "type": "string", - "description": "The password for the minio root user" + "description": "The password for the default MinIO root user." } } }, @@ -1542,6 +1612,7 @@ "Spec.Distribution.Modules.Networking": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Networking module.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -1558,7 +1629,7 @@ "calico", "cilium" ], - "description": "The type of networking to use, either ***calico*** or ***cilium***" + "description": "The type of CNI plugin to use, either `calico` (Tigera Operator) or `cilium`. Default is `calico`." } }, "required": [ @@ -1580,11 +1651,11 @@ "properties": { "podCidr": { "$ref": "#/$defs/Types.Cidr", - "description": "The pod cidr to use for the cilium pods" + "description": "Allows specifing a CIDR for the Pods network different from `.spec.kubernetes.podCidr`. If not set the default is to use `.spec.kubernetes.podCidr`." }, "maskSize": { "type": "string", - "description": "The mask size to use for the cilium pods" + "description": "The mask size to use for the Pods network on each node." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" @@ -1594,6 +1665,7 @@ "Spec.Distribution.Modules.Policy": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Policy module.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -1605,7 +1677,7 @@ "gatekeeper", "kyverno" ], - "description": "The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno***" + "description": "The type of policy enforcement to use, either `none`, `gatekeeper` or `kyverno`.\n\nDefault is `none`." }, "gatekeeper": { "$ref": "#/$defs/Spec.Distribution.Modules.Policy.Gatekeeper" @@ -1651,6 +1723,7 @@ "Spec.Distribution.Modules.Policy.Gatekeeper": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Gatekeeper package.", "properties": { "additionalExcludedNamespaces": { "type": "array", @@ -1666,11 +1739,11 @@ "dryrun", "warn" ], - "description": "The enforcement action to use for the gatekeeper module" + "description": "The default enforcement action to use for the included constraints. `deny` will block the admission when violations to the policies are found, `warn` will show a message to the user but will admit the violating requests and `dryrun` won't give any feedback to the user but it will log the violations." }, "installDefaultPolicies": { "type": "boolean", - "description": "If true, the default policies will be installed" + "description": "Set to `false` to avoid installing the default Gatekeeper policies (constraints templates and constraints) included with the distribution." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" @@ -1684,25 +1757,26 @@ "Spec.Distribution.Modules.Policy.Kyverno": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Kyverno package.", "properties": { "additionalExcludedNamespaces": { "type": "array", "items": { "type": "string" }, - "description": "This parameter adds namespaces to Kyverno's exemption list, so it will not enforce the constraints on them." + "description": "This parameter adds namespaces to Kyverno's exemption list, so it will not enforce the policies on them." }, "validationFailureAction": { "type": "string", "enum": [ - "audit", - "enforce" + "Audit", + "Enforce" ], - "description": "The validation failure action to use for the kyverno module" + "description": "The validation failure action to use for the policies, `Enforce` will block when a request does not comply with the policies and `Audit` will not block but log when a request does not comply with the policies." }, "installDefaultPolicies": { "type": "boolean", - "description": "If true, the default policies will be installed" + "description": "Set to `false` to avoid installing the default Kyverno policies included with distribution." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" @@ -1716,6 +1790,7 @@ "Spec.Distribution.Modules.Dr": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Disaster Recovery module.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -1726,7 +1801,7 @@ "none", "on-premises" ], - "description": "The type of the DR, must be ***none*** or ***on-premises***" + "description": "The type of the Disaster Recovery, must be `none` or `on-premises`. `none` disables the module and `on-premises` will install Velero and an optional MinIO deployment.\n\nDefault is `none`." }, "velero": { "$ref": "#/$defs/Spec.Distribution.Modules.Dr.Velero" @@ -1752,42 +1827,103 @@ "Spec.Distribution.Modules.Dr.Velero": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Velero package.", "properties": { - "retentionTime": { - "type": "string", - "description": "The retention time for velero" - }, "backend": { "type": "string", "enum": [ "minio", "externalEndpoint" ], - "description": "The backend for velero" + "description": "The storage backend type for Velero. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external S3-compatible object storage instead of deploying an in-cluster MinIO." }, "externalEndpoint": { "type": "object", "additionalProperties": false, + "description": "Configuration for Velero's external storage backend.", "properties": { "endpoint": { "type": "string", - "description": "The endpoint for velero" + "description": "External S3-compatible endpoint for Velero's storage." }, "insecure": { "type": "boolean", - "description": "If true, the endpoint will be insecure" + "description": "If true, will use HTTP as protocol instead of HTTPS." }, "secretAccessKey": { "type": "string", - "description": "The secret access key for velero backend" + "description": "The secret access key (password) for the external S3-compatible bucket." }, "accessKeyId": { "type": "string", - "description": "The access key id for velero backend" + "description": "The access key ID (username) for the external S3-compatible bucket." }, "bucketName": { "type": "string", - "description": "The bucket name for velero backend" + "description": "The bucket name of the external S3-compatible object storage." + } + } + }, + "schedules": { + "type": "object", + "additionalProperties": false, + "description": "Configuration for Velero's backup schedules.", + "properties": { + "install": { + "type": "boolean", + "description": "Whether to install or not the default `manifests` and `full` backups schedules. Default is `true`." + }, + "definitions": { + "type": "object", + "additionalProperties": false, + "description": "Configuration for Velero schedules.", + "properties": { + "manifests": { + "type": "object", + "additionalProperties": false, + "description": "Configuration for Velero's manifests backup schedule.", + "properties": { + "schedule": { + "type": "string", + "description": "The cron expression for the `manifests` backup schedule (default `*/15 * * * *`)." + }, + "ttl": { + "type": "string", + "description": "The Time To Live (TTL) of the backups created by the backup schedules (default `720h0m0s`, 30 days). Notice that changing this value will affect only newly created backups, prior backups will keep the old TTL." + } + } + }, + "full": { + "type": "object", + "additionalProperties": false, + "description": "Configuration for Velero's manifests backup schedule.", + "properties": { + "schedule": { + "type": "string", + "description": "The cron expression for the `full` backup schedule (default `0 1 * * *`)." + }, + "ttl": { + "type": "string", + "description": "The Time To Live (TTL) of the backups created by the backup schedules (default `720h0m0s`, 30 days). Notice that changing this value will affect only newly created backups, prior backups will keep the old TTL." + }, + "snapshotMoveData": { + "type": "boolean", + "description": "EXPERIMENTAL (if you do more than one backups, the following backups after the first are not automatically restorable, see https://github.com/vmware-tanzu/velero/issues/7057#issuecomment-2466815898 for the manual restore solution): SnapshotMoveData specifies whether snapshot data should be moved. Velero will create a new volume from the snapshot and upload the content to the storageLocation." + } + } + } + } + } + } + }, + "snapshotController": { + "type": "object", + "additionalProperties": false, + "description": "Configuration for the additional snapshotController component installation.", + "properties": { + "install": { + "type": "boolean", + "description": "Whether to install or not the snapshotController component in the cluster. Before enabling this field, check if your CSI driver does not have snapshotController built-in." } } }, @@ -1799,6 +1935,7 @@ "Spec.Distribution.Modules.Auth": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Auth module.", "properties": { "overrides": { "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Overrides" @@ -1808,7 +1945,7 @@ }, "baseDomain": { "type": "string", - "description": "The base domain for the auth module" + "description": "The base domain for the ingresses created by the Auth module (Gangplank, Pomerium, Dex). Notice that when the ingress module type is `dual`, these will use the `external` ingress class." }, "pomerium": { "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Pomerium" @@ -1903,10 +2040,11 @@ "Spec.Distribution.Modules.Auth.Overrides": { "type": "object", "additionalProperties": false, + "description": "Override the common configuration with a particular configuration for the Auth module.", "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the auth module" + "description": "Set to override the node selector used to place the pods of the Auth module." }, "tolerations": { "type": [ @@ -1916,12 +2054,19 @@ "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for the auth module" + "description": "Set to override the tolerations that will be added to the pods of the Auth module." }, "ingresses": { + "additionalProperties": false, "type": "object", - "additionalProperties": { - "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Overrides.Ingress" + "description": "Override the definition of the Auth module ingresses.", + "properties": { + "gangplank": { + "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Overrides.Ingress" + }, + "dex": { + "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Overrides.Ingress" + } } } } @@ -1932,11 +2077,11 @@ "properties": { "host": { "type": "string", - "description": "The host of the ingress" + "description": "Use this host for the ingress instead of the default one." }, "ingressClass": { "type": "string", - "description": "The ingress class of the ingress" + "description": "Use this ingress class for the ingress instead of the default one." } }, "required": [ @@ -1955,7 +2100,7 @@ "basicAuth", "sso" ], - "description": "The type of the provider, must be ***none***, ***sso*** or ***basicAuth***" + "description": "The type of the Auth provider, options are:\n- `none`: will disable authentication in the infrastructural ingresses.\n- `sso`: will protect the infrastructural ingresses with Pomerium and Dex (SSO) and require authentication before accessing them.\n- `basicAuth`: will protect the infrastructural ingresses with HTTP basic auth (username and password) authentication.\n\nDefault is `none`." }, "basicAuth": { "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Provider.BasicAuth" @@ -1968,14 +2113,15 @@ "Spec.Distribution.Modules.Auth.Provider.BasicAuth": { "type": "object", "additionalProperties": false, + "description": "Configuration for the HTTP Basic Auth provider.", "properties": { "username": { "type": "string", - "description": "The username for the basic auth" + "description": "The username for logging in with the HTTP basic authentication." }, "password": { "type": "string", - "description": "The password for the basic auth" + "description": "The password for logging in with the HTTP basic authentication." } }, "required": [ @@ -1989,14 +2135,15 @@ "Spec.Distribution.Modules.Auth.Dex": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Dex package.", "properties": { "connectors": { "type": "array", - "description": "The connectors for dex" + "description": "A list with each item defining a Dex connector. Follows Dex connectors configuration format: https://dexidp.io/docs/connectors/" }, "additionalStaticClients": { "type": "array", - "description": "The additional static clients for dex" + "description": "Additional static clients defitions that will be added to the default clients included with the distribution in Dex's configuration. Example:\n\n```yaml\nadditionalStaticClients:\n - id: my-custom-client\n name: \"A custom additional static client\"\n redirectURIs:\n - \"https://myapp.tld/redirect\"\n - \"https://alias.tld/oidc-callback\"\n secret: supersecretpassword\n```\nReference: https://dexidp.io/docs/connectors/local/" }, "expiry": { "type": "object", @@ -2026,42 +2173,42 @@ "properties": { "enabled": { "type": "boolean", - "description": "If true, oidc kubernetes auth will be enabled" + "description": "If true, components needed for interacting with the Kubernetes API with OIDC authentication (Gangplank, Dex) be deployed and configued." }, "clientID": { "type": "string", - "description": "The client ID for oidc kubernetes auth" + "description": "The client ID that the Kubernetes API will use to authenticate against the OIDC provider (Dex)." }, "clientSecret": { "type": "string", - "description": "The client secret for oidc kubernetes auth" + "description": "The client secret that the Kubernetes API will use to authenticate against the OIDC provider (Dex)." }, "scopes": { "type": "array", "items": { "type": "string" }, - "description": "The scopes for oidc kubernetes auth" + "description": "Used to specify the scope of the requested Oauth authorization by Gangplank. Defaults to: `[\"openid\", \"profile\", \"email\", \"offline_access\", \"groups\"]`" }, "usernameClaim": { "type": "string", - "description": "The username claim for oidc kubernetes auth" + "description": "The JWT claim to use as the username. This is used in Gangplank's UI. This is combined with the clusterName for the user portion of the kubeconfig. Defaults to `nickname`." }, "emailClaim": { "type": "string", - "description": "The email claim for oidc kubernetes auth" + "description": "DEPRECATED. Defaults to `email`." }, "sessionSecurityKey": { "type": "string", - "description": "The session security key for oidc kubernetes auth" + "description": "The Key to use for the sessions in Gangplank. Must be different between different instances of Gangplank." }, "removeCAFromKubeconfig": { "type": "boolean", - "description": "Set to true to remove the CA from the kubeconfig file" + "description": "Set to true to remove the CA from the kubeconfig file generated by Gangplank." }, "namespace": { "type": "string", - "description": "The namespace to set in the context of the kubeconfig file" + "description": "The namespace to set in the context of the kubeconfig file generated by Gangplank. Defaults to `default`." } }, "required": [ @@ -2210,11 +2357,11 @@ "properties": { "cpu": { "type": "string", - "description": "The cpu request for the loki pods" + "description": "The CPU request for the Pod, in cores. Example: `500m`." }, "memory": { "type": "string", - "description": "The memory request for the prometheus pods" + "description": "The memory request for the Pod. Example: `500M`." } } }, @@ -2224,11 +2371,11 @@ "properties": { "cpu": { "type": "string", - "description": "The cpu limit for the loki pods" + "description": "The CPU limit for the Pod. Example: `1000m`." }, "memory": { "type": "string", - "description": "The memory limit for the prometheus pods" + "description": "The memory limit for the Pod. Example: `1G`." } } } @@ -2236,11 +2383,12 @@ }, "Types.FuryModuleOverrides": { "type": "object", + "description": "Override the common configuration with a particular configuration for the module.", "additionalProperties": false, "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the tracing module" + "description": "Set to override the node selector used to place the pods of the module." }, "tolerations": { "type": [ @@ -2250,7 +2398,7 @@ "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for the policy module" + "description": "Set to override the tolerations that will be added to the pods of the module." }, "ingresses": { "type": "object", @@ -2266,7 +2414,7 @@ "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the minio module" + "description": "Set to override the node selector used to place the pods of the package." }, "tolerations": { "type": [ @@ -2276,7 +2424,7 @@ "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for the minio module" + "description": "Set to override the tolerations that will be added to the pods of the package." } } }, @@ -2286,15 +2434,15 @@ "properties": { "disableAuth": { "type": "boolean", - "description": "If true, the ingress will not have authentication" + "description": "If true, the ingress will not have authentication even if `.spec.modules.auth.provider.type` is SSO or Basic Auth." }, "host": { "type": "string", - "description": "The host of the ingress" + "description": "Use this host for the ingress instead of the default one." }, "ingressClass": { "type": "string", - "description": "The ingress class of the ingress" + "description": "Use this ingress class for the ingress instead of the default one." } } } diff --git a/schemas/public/spec-plugins.json b/schemas/public/spec-plugins.json index df217922a..258cbbf30 100644 --- a/schemas/public/spec-plugins.json +++ b/schemas/public/spec-plugins.json @@ -87,6 +87,10 @@ "type": "string" }, "description": "The values of the release" + }, + "disableValidationOnInstall": { + "type": "boolean", + "description": "Disable running `helm diff` validation when installing the plugin, it will still be done when upgrading." } }, "required": ["name", "namespace", "chart"] diff --git a/templates/config/ekscluster-kfd-v1alpha2.yaml.tpl b/templates/config/ekscluster-kfd-v1alpha2.yaml.tpl index 157f9ff37..3dd175a5d 100644 --- a/templates/config/ekscluster-kfd-v1alpha2.yaml.tpl +++ b/templates/config/ekscluster-kfd-v1alpha2.yaml.tpl @@ -85,6 +85,8 @@ spec: nodeAllowedSshPublicKey: "ssh-ed25519 XYZ" # Either `launch_configurations`, `launch_templates` or `both`. For new clusters use `launch_templates`, for existing cluster you'll need to migrate from `launch_configurations` to `launch_templates` using `both` as interim. nodePoolsLaunchKind: "launch_templates" + # Global default AMI type used for EKS worker nodes. This will apply to all node pools unless overridden by a specific node pool. Valid values are: `alinux2`, `alinux2023` + nodePoolGlobalAmiType: "alinux2" # Optional Kubernetes Cluster log retention in days. Defaults to 90 days. # logRetentionDays: 90 # This map defines the access to the Kubernetes API server @@ -97,6 +99,7 @@ spec: nodePools: # This is the name of the nodepool - name: infra + type: self-managed # This map defines the max and min number of nodes in the nodepool autoscaling group size: min: 1 @@ -124,8 +127,8 @@ spec: - node.kubernetes.io/role=infra:NoSchedule # AWS tags that will be added to the ASG and EC2 instances, the example shows the labels needed by cluster autoscaler tags: - k8s.io/cluster-autoscaler/node-template/label/nodepool: "worker" - k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/role: "worker" + k8s.io/cluster-autoscaler/node-template/label/nodepool: "infra" + k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/role: "infra" # Optional additional firewall rules that will be attached to the nodes #additionalFirewallRules: # # The name of the rule @@ -143,7 +146,7 @@ spec: # to: 80 # # Additional AWS tags # tags: {} - # aws-auth configmap definition, see https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html for more informations + # aws-auth configmap definition, see https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html for more information. awsAuth: {} # additionalAccounts: # - "777777777777" @@ -209,7 +212,7 @@ spec: # - http01: # ingress: # class: nginx - # DNS definition, used in conjunction with externalDNS package to automate DNS management and certificates emission + # DNS definition, used in conjunction with externalDNS package to automate DNS management and certificates emission. dns: # the public DNS zone definition public: @@ -227,6 +230,9 @@ spec: logging: # can be opensearch, loki, customOutput or none. With none, the logging module won't be installed type: loki + # configurations for the loki package + loki: + tsdbStartDate: "2024-11-20" # configurations for the minio-ha package minio: # the PVC size for each minio disk, 6 disks total diff --git a/templates/config/kfddistribution-kfd-v1alpha2.yaml.tpl b/templates/config/kfddistribution-kfd-v1alpha2.yaml.tpl index f6af9d6b1..e2e795330 100644 --- a/templates/config/kfddistribution-kfd-v1alpha2.yaml.tpl +++ b/templates/config/kfddistribution-kfd-v1alpha2.yaml.tpl @@ -76,6 +76,9 @@ spec: logging: # can be opensearch, loki, customOutput or none. With none, the logging module won't be installed type: loki + # configurations for the loki package + loki: + tsdbStartDate: "2024-11-20" # configurations for the minio-ha package minio: # the PVC size for each minio disk, 6 disks total diff --git a/templates/config/onpremises-kfd-v1alpha2.yaml.tpl b/templates/config/onpremises-kfd-v1alpha2.yaml.tpl index bbfdc9727..7ea97c12a 100644 --- a/templates/config/onpremises-kfd-v1alpha2.yaml.tpl +++ b/templates/config/onpremises-kfd-v1alpha2.yaml.tpl @@ -13,19 +13,20 @@ apiVersion: kfd.sighup.io/v1alpha2 kind: OnPremises metadata: - # The name of the cluster, will be also used as a prefix for all the other resources created + # The name of the cluster. It will also be used as a prefix for all the other resources created. name: {{.Name}} spec: - # This value defines which KFD version will be installed and in consequence the Kubernetes version to use to create the cluster, - # it supports git tags and branches + # Defines which KFD version will be installed and, in consequence, the Kubernetes version used to create the cluster. It supports git tags and branches. Example: v1.29.4. distributionVersion: {{.DistributionVersion}} - # This section describes how the cluster will be created + # Defines the Kubernetes components configuration and the values needed for the `kubernetes` phase of furyctl. kubernetes: + # The path to the folder where the PKI files for Kubernetes and etcd are stored. pkiFolder: ./pki + # SSH credentials to access the hosts ssh: username: johndoe keyPath: /youruserpath/.ssh/id_ed25519 - # this zone will be concatenated to the - name on each host to generate kubernetes_hostname in the hosts.yaml file, and also for the etcd initial cluster value + # The DNS zone of the machines. It will be appended to the name of each host to generate the `kubernetes_hostname` in the Ansible inventory file. It is also used to calculate etcd's initial cluster value. dnsZone: example.dev controlPlaneAddress: control-planelocal.example.dev:6443 podCidr: 172.16.128.0/17 @@ -120,7 +121,7 @@ spec: type: "calico" # This section contains all the configurations for the ingress module ingress: - # the base domain used for all the KFD ingresses, if in the nginx dual configuration, it should be the same as the .spec.distribution.modules.ingress.dns.private.name zone + # The base domain used for all the KFD infrastructural ingresses. If using the nginx dual type, this value should be the same as the `.spec.distribution.modules.ingress.dns.private.name` baseDomain: internal.example.dev # configurations for the nginx ingress controller package nginx: @@ -138,7 +139,7 @@ spec: # key: "{file://relative/path/to/ssl.key}" # the ca file, a file notation can be used to get the content from a file # ca: "{file://relative/path/to/ssl.ca}" - # configuration for the cert-manager package + # configuration for the cert-manager package Required even if `ingress.nginx.type` is `none`, cert-manager is used for managing other certificates in the cluster besides the TLS termination certificates for the ingresses." certManager: # the configuration for the clusterIssuer that will be created clusterIssuer: @@ -152,6 +153,9 @@ spec: logging: # can be opensearch, loki, customOutput or none. With none, the logging module won't be installed type: loki + # configurations for the loki package + loki: + tsdbStartDate: "2024-11-20" # configurations for the minio-ha package minio: # the PVC size for each minio disk, 6 disks total @@ -190,7 +194,7 @@ spec: provider: # The authentication type used for the infrastructure ingresses (all the ingress for the distribution) can be none, basicAuth, sso type: none - # The base domain used for all the auth ingresses, if in the nginx dual configuration, it should be the same as the .spec.distribution.modules.ingress.dns.public.name zone + # The base domain used for all the auth ingresses, if in the nginx dual configuration, it should be the same as the .spec.distribution.modules.ingress.dns.public.name domain baseDomain: example.dev # Custom Patches to add or override fields in the generated manifests #customPatches: {} diff --git a/templates/distribution/_helpers.tpl b/templates/distribution/_helpers.tpl index 335146356..6f5e2a26a 100644 --- a/templates/distribution/_helpers.tpl +++ b/templates/distribution/_helpers.tpl @@ -42,7 +42,11 @@ $moduleNodeSelector (index .spec.distribution.common "nodeSelector") -}} + {{- if and (not $nodeSelector) (index . "returnEmptyInsteadOfNull") .returnEmptyInsteadOfNull -}} + {{- "{}" | indent $indent | trim -}} + {{- else -}} {{- $nodeSelector | toYaml | indent $indent | trim -}} + {{- end -}} {{- end -}} {{- define "tolerations" -}} @@ -70,7 +74,11 @@ $moduleTolerations (index .spec.distribution.common "tolerations") -}} + {{- if and (not $tolerations) (index . "returnEmptyInsteadOfNull") .returnEmptyInsteadOfNull -}} + {{- "[]" | indent $indent | trim -}} + {{- else -}} {{- $tolerations | toYaml | indent $indent | trim -}} + {{- end -}} {{- end -}} {{ define "globalIngressClass" }} @@ -125,7 +133,7 @@ - hosts: - {{ template "ingressHost" . }} {{- if eq .spec.distribution.modules.ingress.nginx.tls.provider "certManager" }} - secretName: {{ lower .package }}-tls + secretName: {{ lower .prefix | trimSuffix "." }}-tls {{- end }} {{- end }} {{- end -}} diff --git a/templates/distribution/manifests/auth/kustomization.yaml.tpl b/templates/distribution/manifests/auth/kustomization.yaml.tpl index 5ab2396a8..a1e43808c 100644 --- a/templates/distribution/manifests/auth/kustomization.yaml.tpl +++ b/templates/distribution/manifests/auth/kustomization.yaml.tpl @@ -17,6 +17,10 @@ resources: - resources/ingress-infra.yml {{- end }} +{{ if eq .spec.distribution.common.networkPoliciesEnabled true }} + - policies +{{- end }} + patchesStrategicMerge: - patches/infra-nodes.yml - patches/pomerium-ingress.yml diff --git a/templates/distribution/manifests/auth/policies/acme-http-solver.yaml.tpl b/templates/distribution/manifests/auth/policies/acme-http-solver.yaml.tpl new file mode 100644 index 000000000..0f7a8a246 --- /dev/null +++ b/templates/distribution/manifests/auth/policies/acme-http-solver.yaml.tpl @@ -0,0 +1,35 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: acme-httpsolver-ingress-nginx + namespace: pomerium + labels: + cluster.kfd.sighup.io/module: auth + cluster.kfd.sighup.io/auth-provider-type: sso +spec: + podSelector: + matchLabels: + app: cert-manager + policyTypes: + - Ingress + ingress: + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: ingress-nginx + podSelector: + matchLabels: +{{- if eq .spec.distribution.modules.ingress.nginx.type "dual" }} + app: ingress +{{- else if eq .spec.distribution.modules.ingress.nginx.type "single" }} + app: ingress-nginx +{{- end }} + ports: + - port: 8089 + protocol: TCP +--- diff --git a/templates/distribution/manifests/auth/policies/common.yaml.tpl b/templates/distribution/manifests/auth/policies/common.yaml.tpl new file mode 100644 index 000000000..1b8300e14 --- /dev/null +++ b/templates/distribution/manifests/auth/policies/common.yaml.tpl @@ -0,0 +1,44 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: deny-all + namespace: pomerium + labels: + cluster.kfd.sighup.io/module: auth + cluster.kfd.sighup.io/auth-provider-type: sso +spec: + podSelector: {} + policyTypes: + - Egress + - Ingress +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: all-egress-kube-dns + namespace: pomerium + labels: + cluster.kfd.sighup.io/module: auth + cluster.kfd.sighup.io/auth-provider-type: sso +spec: + podSelector: + matchLabels: {} + policyTypes: + - Egress + egress: + - to: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: kube-system + podSelector: + matchLabels: + k8s-app: kube-dns + ports: + - protocol: UDP + port: 53 +--- diff --git a/templates/distribution/manifests/auth/policies/kustomization.yaml.tpl b/templates/distribution/manifests/auth/policies/kustomization.yaml.tpl new file mode 100644 index 000000000..49e948a8d --- /dev/null +++ b/templates/distribution/manifests/auth/policies/kustomization.yaml.tpl @@ -0,0 +1,15 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +{{- if eq .spec.distribution.modules.auth.provider.type "sso" }} +resources: + - common.yaml + - acme-http-solver.yaml + - pomerium.yaml + - prometheus-metrics.yaml +{{- end }} diff --git a/templates/distribution/manifests/auth/policies/pomerium.yaml.tpl b/templates/distribution/manifests/auth/policies/pomerium.yaml.tpl new file mode 100644 index 000000000..d610a905d --- /dev/null +++ b/templates/distribution/manifests/auth/policies/pomerium.yaml.tpl @@ -0,0 +1,52 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: pomerium-ingress-nginx + namespace: pomerium + labels: + cluster.kfd.sighup.io/module: auth + cluster.kfd.sighup.io/auth-provider-type: sso +spec: + policyTypes: + - Ingress + podSelector: + matchLabels: + app: pomerium + ingress: + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: ingress-nginx + podSelector: + matchLabels: +{{- if eq .spec.distribution.modules.ingress.nginx.type "dual" }} + app: ingress +{{- else if eq .spec.distribution.modules.ingress.nginx.type "single" }} + app: ingress-nginx +{{- end }} + ports: + - port: 8080 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: pomerium-egress-all + namespace: pomerium + labels: + cluster.kfd.sighup.io/module: auth + cluster.kfd.sighup.io/auth-provider-type: sso +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app: pomerium + egress: + - {} +--- diff --git a/templates/distribution/manifests/auth/policies/prometheus-metrics.yaml.tpl b/templates/distribution/manifests/auth/policies/prometheus-metrics.yaml.tpl new file mode 100644 index 000000000..355ca48dd --- /dev/null +++ b/templates/distribution/manifests/auth/policies/prometheus-metrics.yaml.tpl @@ -0,0 +1,31 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: pomerium-ingress-prometheus-metrics + namespace: pomerium + labels: + cluster.kfd.sighup.io/module: auth + cluster.kfd.sighup.io/auth-provider-type: sso +spec: + podSelector: + matchLabels: + app: pomerium + policyTypes: + - Ingress + ingress: + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: monitoring + podSelector: + matchLabels: + app.kubernetes.io/name: prometheus + ports: + - protocol: TCP + port: 9090 +--- diff --git a/templates/distribution/manifests/auth/resources/pomerium-policy.yml.tpl b/templates/distribution/manifests/auth/resources/pomerium-policy.yml.tpl index 83477f90b..cf18e05b1 100644 --- a/templates/distribution/manifests/auth/resources/pomerium-policy.yml.tpl +++ b/templates/distribution/manifests/auth/resources/pomerium-policy.yml.tpl @@ -99,6 +99,8 @@ routes: and: - authenticated_user: true {{- end }} + {{- end }} + {{- if and (eq .spec.distribution.modules.tracing.type "tempo") (eq .spec.distribution.modules.tracing.tempo.backend "minio") (.checks.storageClassAvailable) }} - from: https://{{ template "minioTracingUrl" .spec }} to: http://minio-tracing-console.tracing.svc.cluster.local:9001 allow_websockets: true diff --git a/templates/distribution/manifests/auth/secrets/basic-auth.yml.tpl b/templates/distribution/manifests/auth/secrets/basic-auth.yml.tpl index 78f822e26..2e3720e22 100644 --- a/templates/distribution/manifests/auth/secrets/basic-auth.yml.tpl +++ b/templates/distribution/manifests/auth/secrets/basic-auth.yml.tpl @@ -52,4 +52,15 @@ type: Opaque stringData: auth: {{ htpasswd $username $password }} {{- end }} -{{- end -}} \ No newline at end of file +{{ if eq .spec.distribution.modules.networking.type "cilium" }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: basic-auth + namespace: kube-system +type: Opaque +stringData: + auth: {{ htpasswd $username $password }} +{{- end }} +{{- end -}} diff --git a/templates/distribution/manifests/aws/kustomization.yaml.tpl b/templates/distribution/manifests/aws/kustomization.yaml.tpl index 5fc017d20..dcfc507b0 100644 --- a/templates/distribution/manifests/aws/kustomization.yaml.tpl +++ b/templates/distribution/manifests/aws/kustomization.yaml.tpl @@ -13,7 +13,8 @@ resources: - {{ print "../" .spec.distribution.common.relativeVendorPath "/modules/aws/katalog/snapshot-controller" }} - {{ print "../" .spec.distribution.common.relativeVendorPath "/modules/aws/katalog/load-balancer-controller" }} - {{ print "../" .spec.distribution.common.relativeVendorPath "/modules/aws/katalog/node-termination-handler" }} - - resources/sc.yml + - resources/storageclasses.yml + - resources/snapshotclasses.yml patchesStrategicMerge: - patches/cluster-autoscaler.yml diff --git a/templates/distribution/manifests/aws/resources/snapshotclasses.yml b/templates/distribution/manifests/aws/resources/snapshotclasses.yml new file mode 100644 index 000000000..e75210305 --- /dev/null +++ b/templates/distribution/manifests/aws/resources/snapshotclasses.yml @@ -0,0 +1,8 @@ +apiVersion: snapshot.storage.k8s.io/v1 +kind: VolumeSnapshotClass +metadata: + name: ebs-sc + labels: + velero.io/csi-volumesnapshot-class: "true" +driver: ebs.csi.aws.com +deletionPolicy: Retain \ No newline at end of file diff --git a/templates/distribution/manifests/aws/resources/sc.yml b/templates/distribution/manifests/aws/resources/storageclasses.yml similarity index 100% rename from templates/distribution/manifests/aws/resources/sc.yml rename to templates/distribution/manifests/aws/resources/storageclasses.yml diff --git a/templates/distribution/manifests/dr/kustomization.yaml.tpl b/templates/distribution/manifests/dr/kustomization.yaml.tpl index fa13dbd12..f3fdea029 100644 --- a/templates/distribution/manifests/dr/kustomization.yaml.tpl +++ b/templates/distribution/manifests/dr/kustomization.yaml.tpl @@ -16,8 +16,12 @@ resources: {{- else }} - {{ print "../" .spec.distribution.common.relativeVendorPath "/modules/dr/katalog/velero/velero-aws" }} - resources/storageLocation.yaml + - resources/volumeSnapshotLocation.yaml {{- end }} - {{ print "../" .spec.distribution.common.relativeVendorPath "/modules/dr/katalog/velero/velero-node-agent" }} +{{- if .spec.distribution.modules.dr.velero.snapshotController.install }} + - {{ print "../" .spec.distribution.common.relativeVendorPath "/modules/dr/katalog/velero/snapshot-controller" }} +{{- end }} {{- end }} {{- if .spec.distribution.modules.dr.velero.schedules.install }} @@ -33,10 +37,8 @@ patchesStrategicMerge: {{- if eq .spec.distribution.common.provider.type "eks" }} - patches/eks-velero.yml {{- end }} -{{- if and (.spec.distribution.modules.dr.velero.schedules.install) (ne .spec.distribution.modules.dr.velero.schedules.cron.manifests "") }} +{{- if .spec.distribution.modules.dr.velero.schedules.install }} - patches/velero-schedule-manifests.yml -{{- end }} -{{- if and (.spec.distribution.modules.dr.velero.schedules.install) (ne .spec.distribution.modules.dr.velero.schedules.cron.full "") }} - patches/velero-schedule-full.yml {{- end }} diff --git a/templates/distribution/manifests/dr/patches/infra-nodes.yml.tpl b/templates/distribution/manifests/dr/patches/infra-nodes.yml.tpl index 271394159..834150ac0 100644 --- a/templates/distribution/manifests/dr/patches/infra-nodes.yml.tpl +++ b/templates/distribution/manifests/dr/patches/infra-nodes.yml.tpl @@ -61,4 +61,21 @@ spec: {{ template "tolerations" $veleroArgs }} {{- end }} + +{{- if .spec.distribution.modules.dr.velero.snapshotController.install }} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: snapshot-controller + namespace: kube-system +spec: + template: + spec: + nodeSelector: + {{ template "nodeSelector" $veleroArgs }} + tolerations: + {{ template "tolerations" $veleroArgs }} +{{- end }} + {{- end }} \ No newline at end of file diff --git a/templates/distribution/manifests/dr/patches/velero-schedule-full.yml.tpl b/templates/distribution/manifests/dr/patches/velero-schedule-full.yml.tpl index 898d24550..b572e61ba 100644 --- a/templates/distribution/manifests/dr/patches/velero-schedule-full.yml.tpl +++ b/templates/distribution/manifests/dr/patches/velero-schedule-full.yml.tpl @@ -9,4 +9,8 @@ metadata: name: full namespace: kube-system spec: - schedule: {{ .spec.distribution.modules.dr.velero.schedules.cron.full }} + schedule: "{{ .spec.distribution.modules.dr.velero.schedules.definitions.full.schedule }}" + template: + ttl: "{{ .spec.distribution.modules.dr.velero.schedules.definitions.full.ttl }}" + snapshotMoveData: {{ .spec.distribution.modules.dr.velero.schedules.definitions.full.snapshotMoveData }} + diff --git a/templates/distribution/manifests/dr/patches/velero-schedule-manifests.yml.tpl b/templates/distribution/manifests/dr/patches/velero-schedule-manifests.yml.tpl index a67fba88c..3441eb7fc 100644 --- a/templates/distribution/manifests/dr/patches/velero-schedule-manifests.yml.tpl +++ b/templates/distribution/manifests/dr/patches/velero-schedule-manifests.yml.tpl @@ -9,4 +9,6 @@ metadata: name: manifests namespace: kube-system spec: - schedule: {{ .spec.distribution.modules.dr.velero.schedules.cron.manifests }} + schedule: "{{ .spec.distribution.modules.dr.velero.schedules.definitions.manifests.schedule }}" + template: + ttl: "{{ .spec.distribution.modules.dr.velero.schedules.definitions.manifests.ttl }}" diff --git a/templates/distribution/manifests/dr/resources/volumeSnapshotLocation.yaml.tpl b/templates/distribution/manifests/dr/resources/volumeSnapshotLocation.yaml.tpl new file mode 100644 index 000000000..5679203f9 --- /dev/null +++ b/templates/distribution/manifests/dr/resources/volumeSnapshotLocation.yaml.tpl @@ -0,0 +1,16 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +--- +apiVersion: velero.io/v1 +kind: VolumeSnapshotLocation +metadata: + name: default + namespace: kube-system + labels: + k8s-app: velero +spec: + config: + region: custom + provider: aws \ No newline at end of file diff --git a/templates/distribution/manifests/ingress/kustomization.yaml.tpl b/templates/distribution/manifests/ingress/kustomization.yaml.tpl index 475584386..8225fdadc 100644 --- a/templates/distribution/manifests/ingress/kustomization.yaml.tpl +++ b/templates/distribution/manifests/ingress/kustomization.yaml.tpl @@ -24,6 +24,10 @@ resources: {{- end }} +{{ if eq .spec.distribution.common.networkPoliciesEnabled true }} + - policies +{{- end }} + {{- if ne .spec.distribution.modules.ingress.nginx.type "none" }} - {{ print "../" .spec.distribution.common.relativeVendorPath "/modules/ingress/katalog/forecastle" }} {{- end }} @@ -37,7 +41,7 @@ resources: - resources/ingress-infra.yml {{- end }} -{{ if eq .spec.distribution.modules.ingress.nginx.tls.provider "secret" }} +{{ if and (eq .spec.distribution.modules.ingress.nginx.tls.provider "secret") (ne .spec.distribution.modules.ingress.nginx.type "none") }} - secrets/tls.yml {{- end }} @@ -92,14 +96,14 @@ patchesJson6902: group: apps version: v1 kind: DaemonSet - name: nginx-ingress-controller-external + name: ingress-nginx-controller-external namespace: ingress-nginx path: patchesJson/ingress-nginx.yml - target: group: apps version: v1 kind: DaemonSet - name: nginx-ingress-controller-internal + name: ingress-nginx-controller-internal namespace: ingress-nginx path: patchesJson/ingress-nginx.yml {{- else if eq .spec.distribution.modules.ingress.nginx.type "single" }} @@ -107,7 +111,7 @@ patchesJson6902: group: apps version: v1 kind: DaemonSet - name: nginx-ingress-controller + name: ingress-nginx-controller namespace: ingress-nginx path: patchesJson/ingress-nginx.yml {{- end }} diff --git a/templates/distribution/manifests/ingress/patches/eks-ingress-nginx-external.yml.tpl b/templates/distribution/manifests/ingress/patches/eks-ingress-nginx-external.yml.tpl index 663d748ea..a18265814 100644 --- a/templates/distribution/manifests/ingress/patches/eks-ingress-nginx-external.yml.tpl +++ b/templates/distribution/manifests/ingress/patches/eks-ingress-nginx-external.yml.tpl @@ -22,7 +22,7 @@ spec: apiVersion: v1 kind: ConfigMap metadata: - name: nginx-configuration-external + name: ingress-nginx-controller-external namespace: ingress-nginx data: use-proxy-protocol: "true" diff --git a/templates/distribution/manifests/ingress/patches/eks-ingress-nginx-internal.yml.tpl b/templates/distribution/manifests/ingress/patches/eks-ingress-nginx-internal.yml.tpl index a7aa6f6ad..6ae0a6b14 100644 --- a/templates/distribution/manifests/ingress/patches/eks-ingress-nginx-internal.yml.tpl +++ b/templates/distribution/manifests/ingress/patches/eks-ingress-nginx-internal.yml.tpl @@ -21,7 +21,7 @@ spec: apiVersion: v1 kind: ConfigMap metadata: - name: nginx-configuration-internal + name: ingress-nginx-controller-internal namespace: ingress-nginx data: use-proxy-protocol: "true" diff --git a/templates/distribution/manifests/ingress/patches/eks-ingress-nginx.yml.tpl b/templates/distribution/manifests/ingress/patches/eks-ingress-nginx.yml.tpl index 08dc64d82..60b7771ba 100644 --- a/templates/distribution/manifests/ingress/patches/eks-ingress-nginx.yml.tpl +++ b/templates/distribution/manifests/ingress/patches/eks-ingress-nginx.yml.tpl @@ -22,7 +22,7 @@ spec: apiVersion: v1 kind: ConfigMap metadata: - name: nginx-configuration + name: ingress-nginx-controller namespace: ingress-nginx data: use-proxy-protocol: "true" diff --git a/templates/distribution/manifests/ingress/patches/infra-nodes.yml.tpl b/templates/distribution/manifests/ingress/patches/infra-nodes.yml.tpl index f94fe9a6c..2ba355781 100644 --- a/templates/distribution/manifests/ingress/patches/infra-nodes.yml.tpl +++ b/templates/distribution/manifests/ingress/patches/infra-nodes.yml.tpl @@ -66,7 +66,7 @@ spec: apiVersion: apps/v1 kind: DaemonSet metadata: - name: nginx-ingress-controller-external + name: ingress-nginx-controller-external namespace: ingress-nginx spec: template: @@ -79,7 +79,7 @@ spec: apiVersion: apps/v1 kind: DaemonSet metadata: - name: nginx-ingress-controller-internal + name: ingress-nginx-controller-internal namespace: ingress-nginx spec: template: @@ -93,7 +93,7 @@ spec: apiVersion: apps/v1 kind: DaemonSet metadata: - name: nginx-ingress-controller + name: ingress-nginx-controller namespace: ingress-nginx spec: template: diff --git a/templates/distribution/manifests/ingress/policies/cert-manager/cert-manager.yaml.tpl b/templates/distribution/manifests/ingress/policies/cert-manager/cert-manager.yaml.tpl new file mode 100644 index 000000000..bbc937c2b --- /dev/null +++ b/templates/distribution/manifests/ingress/policies/cert-manager/cert-manager.yaml.tpl @@ -0,0 +1,88 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +# https://cert-manager.io/docs/installation/best-practice/#network-requirements +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: cert-manager-egress-kube-apiserver + namespace: cert-manager + labels: + cluster.kfd.sighup.io/module: ingress + cluster.kfd.sighup.io/ingress-type: nginx +spec: + podSelector: + matchLabels: + app.kubernetes.io/instance: cert-manager + policyTypes: + - Egress + egress: + - ports: + - port: 6443 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: cert-manager-webhook-ingress-kube-apiserver + namespace: cert-manager + labels: + cluster.kfd.sighup.io/module: ingress + cluster.kfd.sighup.io/ingress-type: nginx +spec: + podSelector: + matchLabels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: cert-manager + policyTypes: + - Ingress + ingress: + - ports: + - port: 10250 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: cert-manager-egress-https + namespace: cert-manager + labels: + cluster.kfd.sighup.io/module: ingress + cluster.kfd.sighup.io/ingress-type: nginx +spec: + podSelector: + matchLabels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: cert-manager + policyTypes: + - Egress + egress: + - ports: + - port: 443 + protocol: TCP + - port: 80 + protocol: TCP +--- +{{- if eq .spec.distribution.modules.auth.provider.type "sso" }} +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: acme-http-solver-ingress-lets-encrypt + namespace: pomerium + labels: + cluster.kfd.sighup.io/module: ingress + cluster.kfd.sighup.io/ingress-type: nginx +spec: + podSelector: + matchLabels: + app: cert-manager + policyTypes: + - Ingress + ingress: + - ports: + - port: 8089 + protocol: TCP +--- +{{- end }} diff --git a/templates/distribution/manifests/ingress/policies/cert-manager/common.yaml.tpl b/templates/distribution/manifests/ingress/policies/cert-manager/common.yaml.tpl new file mode 100644 index 000000000..963b7db18 --- /dev/null +++ b/templates/distribution/manifests/ingress/policies/cert-manager/common.yaml.tpl @@ -0,0 +1,38 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: deny-all + namespace: cert-manager + labels: + cluster.kfd.sighup.io/module: ingress + cluster.kfd.sighup.io/ingress-type: nginx +spec: + podSelector: {} + policyTypes: + - Egress + - Ingress +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: all-egress-kube-dns + namespace: cert-manager + labels: + cluster.kfd.sighup.io/module: ingress + cluster.kfd.sighup.io/ingress-type: nginx +spec: + podSelector: + matchLabels: {} + policyTypes: + - Egress + egress: + - ports: + - protocol: UDP + port: 53 + - protocol: TCP + port: 53 + # https://cert-manager.io/docs/installation/best-practice/#network-requirements \ No newline at end of file diff --git a/templates/distribution/manifests/ingress/policies/cert-manager/kustomization.yaml.tpl b/templates/distribution/manifests/ingress/policies/cert-manager/kustomization.yaml.tpl new file mode 100644 index 000000000..b71d8d27f --- /dev/null +++ b/templates/distribution/manifests/ingress/policies/cert-manager/kustomization.yaml.tpl @@ -0,0 +1,12 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: + - common.yaml + - cert-manager.yaml + - prometheus-metrics.yaml diff --git a/templates/distribution/manifests/ingress/policies/cert-manager/prometheus-metrics.yaml.tpl b/templates/distribution/manifests/ingress/policies/cert-manager/prometheus-metrics.yaml.tpl new file mode 100644 index 000000000..c329f39e5 --- /dev/null +++ b/templates/distribution/manifests/ingress/policies/cert-manager/prometheus-metrics.yaml.tpl @@ -0,0 +1,30 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: cert-manager-ingress-prometheus-metrics + namespace: cert-manager + labels: + cluster.kfd.sighup.io/module: ingress + cluster.kfd.sighup.io/ingress-type: nginx +spec: + podSelector: + matchLabels: + app.kubernetes.io/component: controller + policyTypes: + - Ingress + ingress: + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: monitoring + podSelector: + matchLabels: + app.kubernetes.io/name: prometheus + ports: + - port: 9402 + protocol: TCP diff --git a/templates/distribution/manifests/ingress/policies/ingress-nginx/common.yaml.tpl b/templates/distribution/manifests/ingress/policies/ingress-nginx/common.yaml.tpl new file mode 100644 index 000000000..d1a1f295a --- /dev/null +++ b/templates/distribution/manifests/ingress/policies/ingress-nginx/common.yaml.tpl @@ -0,0 +1,44 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: deny-all + namespace: ingress-nginx + labels: + cluster.kfd.sighup.io/module: ingress + cluster.kfd.sighup.io/ingress-type: nginx +spec: + podSelector: {} + policyTypes: + - Egress + - Ingress +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: all-egress-kube-dns + namespace: ingress-nginx + labels: + cluster.kfd.sighup.io/module: ingress + cluster.kfd.sighup.io/ingress-type: nginx +spec: + podSelector: + matchLabels: {} + policyTypes: + - Egress + egress: + - to: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: kube-system + podSelector: + matchLabels: + k8s-app: kube-dns + ports: + - protocol: UDP + port: 53 +--- diff --git a/templates/distribution/manifests/ingress/policies/ingress-nginx/external-dns.yaml.tpl b/templates/distribution/manifests/ingress/policies/ingress-nginx/external-dns.yaml.tpl new file mode 100644 index 000000000..3bd02356e --- /dev/null +++ b/templates/distribution/manifests/ingress/policies/ingress-nginx/external-dns.yaml.tpl @@ -0,0 +1,22 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: external-dns-egress-all + namespace: ingress-nginx + labels: + cluster.kfd.sighup.io/module: ingress + cluster.kfd.sighup.io/ingress-type: nginx +spec: + podSelector: + matchLabels: + app: external-dns + policyTypes: + - Egress + egress: + - {} +--- diff --git a/templates/distribution/manifests/ingress/policies/ingress-nginx/forecastle.yaml.tpl b/templates/distribution/manifests/ingress/policies/ingress-nginx/forecastle.yaml.tpl new file mode 100644 index 000000000..c223b5b3d --- /dev/null +++ b/templates/distribution/manifests/ingress/policies/ingress-nginx/forecastle.yaml.tpl @@ -0,0 +1,59 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: forecastle-ingress-nginx + namespace: ingress-nginx + labels: + cluster.kfd.sighup.io/module: ingress + cluster.kfd.sighup.io/ingress-type: nginx +spec: + podSelector: + matchLabels: + app: forecastle + policyTypes: + - Ingress + ingress: + - from: + - namespaceSelector: +{{- if (eq .spec.distribution.modules.auth.provider.type "sso") }} + matchLabels: + kubernetes.io/metadata.name: pomerium +{{ else }} + matchLabels: + kubernetes.io/metadata.name: ingress-nginx +{{- end }} + podSelector: + matchLabels: +{{- if (eq .spec.distribution.modules.auth.provider.type "sso") }} + app: pomerium +{{- else if eq .spec.distribution.modules.ingress.nginx.type "dual" }} + app: ingress +{{- else if eq .spec.distribution.modules.ingress.nginx.type "single" }} + app: ingress-nginx +{{- end }} + ports: + - port: 3000 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: forecastle-egress-kube-apiserver + namespace: ingress-nginx + labels: + cluster.kfd.sighup.io/module: ingress + cluster.kfd.sighup.io/ingress-type: nginx +spec: + podSelector: + matchLabels: + app: forecastle + policyTypes: + - Egress + egress: + - ports: + - port: 6443 + protocol: TCP \ No newline at end of file diff --git a/templates/distribution/manifests/ingress/policies/ingress-nginx/kustomization.yaml.tpl b/templates/distribution/manifests/ingress/policies/ingress-nginx/kustomization.yaml.tpl new file mode 100644 index 000000000..46494b30e --- /dev/null +++ b/templates/distribution/manifests/ingress/policies/ingress-nginx/kustomization.yaml.tpl @@ -0,0 +1,14 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: + - common.yaml + - forecastle.yaml + - nginx-ingress-controller.yaml + - prometheus-metrics.yaml + - external-dns.yaml diff --git a/templates/distribution/manifests/ingress/policies/ingress-nginx/nginx-ingress-controller.yaml.tpl b/templates/distribution/manifests/ingress/policies/ingress-nginx/nginx-ingress-controller.yaml.tpl new file mode 100644 index 000000000..164cb229c --- /dev/null +++ b/templates/distribution/manifests/ingress/policies/ingress-nginx/nginx-ingress-controller.yaml.tpl @@ -0,0 +1,51 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: nginx-egress-all + namespace: ingress-nginx + labels: + cluster.kfd.sighup.io/module: ingress + cluster.kfd.sighup.io/ingress-type: nginx +spec: + podSelector: + matchLabels: +{{- if eq .spec.distribution.modules.ingress.nginx.type "dual" }} + app: ingress +{{- else if eq .spec.distribution.modules.ingress.nginx.type "single" }} + app: ingress-nginx +{{- end }} + policyTypes: + - Egress + egress: + - {} +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: all-ingress-nginx + namespace: ingress-nginx + labels: + cluster.kfd.sighup.io/module: ingress + cluster.kfd.sighup.io/ingress-type: nginx +spec: + podSelector: + matchLabels: +{{- if eq .spec.distribution.modules.ingress.nginx.type "dual" }} + app: ingress +{{- else if eq .spec.distribution.modules.ingress.nginx.type "single" }} + app: ingress-nginx +{{- end }} + ingress: + - ports: + - port: 8080 + protocol: TCP + - port: 8443 + protocol: TCP + - port: 9443 + protocol: TCP + policyTypes: + - Ingress diff --git a/templates/distribution/manifests/ingress/policies/ingress-nginx/prometheus-metrics.yaml.tpl b/templates/distribution/manifests/ingress/policies/ingress-nginx/prometheus-metrics.yaml.tpl new file mode 100644 index 000000000..f070b9d54 --- /dev/null +++ b/templates/distribution/manifests/ingress/policies/ingress-nginx/prometheus-metrics.yaml.tpl @@ -0,0 +1,33 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: nginx-ingress-prometheus-metrics + namespace: ingress-nginx + labels: + cluster.kfd.sighup.io/module: ingress + cluster.kfd.sighup.io/ingress-type: nginx +spec: + podSelector: + matchLabels: +{{- if eq .spec.distribution.modules.ingress.nginx.type "dual" }} + app: ingress +{{- else if eq .spec.distribution.modules.ingress.nginx.type "single" }} + app: ingress-nginx +{{- end }} + policyTypes: + - Ingress + ingress: + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: monitoring + podSelector: + matchLabels: + app.kubernetes.io/name: prometheus + ports: + - protocol: TCP + port: 10254 diff --git a/templates/distribution/manifests/ingress/policies/kustomization.yaml.tpl b/templates/distribution/manifests/ingress/policies/kustomization.yaml.tpl new file mode 100644 index 000000000..22b97ea52 --- /dev/null +++ b/templates/distribution/manifests/ingress/policies/kustomization.yaml.tpl @@ -0,0 +1,15 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: +{{- if eq .spec.distribution.modules.ingress.nginx.tls.provider "certManager" }} + - cert-manager +{{ end }} +{{- if ne .spec.distribution.modules.ingress.nginx.type "none" }} + - ingress-nginx +{{ end }} \ No newline at end of file diff --git a/templates/distribution/manifests/ingress/resources/cert-manager-clusterissuer.yml.tpl b/templates/distribution/manifests/ingress/resources/cert-manager-clusterissuer.yml.tpl index 3f5b891bd..357bd2e2b 100644 --- a/templates/distribution/manifests/ingress/resources/cert-manager-clusterissuer.yml.tpl +++ b/templates/distribution/manifests/ingress/resources/cert-manager-clusterissuer.yml.tpl @@ -31,11 +31,15 @@ spec: ingress: class: {{ template "globalIngressClass" (dict "type" "external" "spec" .spec) }} podTemplate: + metadata: + labels: + app: cert-manager spec: nodeSelector: - {{ template "nodeSelector" $certManagerArgs }} + {{- /* NOTE!: merge order is important below */}} + {{ template "nodeSelector" ( merge (dict "returnEmptyInsteadOfNull" true) $certManagerArgs ) }} tolerations: - {{ template "tolerations" ( merge (dict "indent" 16) $certManagerArgs ) }} + {{ template "tolerations" ( merge (dict "indent" 16 "returnEmptyInsteadOfNull" true) $certManagerArgs ) }} {{- end -}} {{- else if .spec.distribution.modules.ingress.certManager.clusterIssuer.solvers }} solvers: diff --git a/templates/distribution/manifests/logging/kustomization.yaml.tpl b/templates/distribution/manifests/logging/kustomization.yaml.tpl index c8f361bfd..9b1b42738 100644 --- a/templates/distribution/manifests/logging/kustomization.yaml.tpl +++ b/templates/distribution/manifests/logging/kustomization.yaml.tpl @@ -47,6 +47,9 @@ resources: - {{ print "../" .spec.distribution.common.relativeVendorPath "/modules/logging/katalog/loki-distributed" }} {{- end }} +{{ if eq .spec.distribution.common.networkPoliciesEnabled true }} + - policies +{{- end }} # The kustomize version we are using does not support specifing more than 1 strategicMerge patch # in a single YAML file under the `patches` directive like the old versions did for `patchesStrategicMerge`. diff --git a/templates/distribution/manifests/logging/patches/loki-config.yaml.tpl b/templates/distribution/manifests/logging/patches/loki-config.yaml.tpl index f1851f754..7d9c30240 100644 --- a/templates/distribution/manifests/logging/patches/loki-config.yaml.tpl +++ b/templates/distribution/manifests/logging/patches/loki-config.yaml.tpl @@ -77,6 +77,15 @@ schema_config: object_store: s3 schema: v11 store: boltdb-shipper +{{- if and (index .spec.distribution.modules.logging "loki") (index .spec.distribution.modules.logging.loki "tsdbStartDate") }} + - from: "{{ .spec.distribution.modules.logging.loki.tsdbStartDate }}" + index: + period: 24h + prefix: index_ + object_store: s3 + schema: v13 + store: tsdb +{{- end }} server: http_listen_port: 3100 storage_config: @@ -95,6 +104,12 @@ storage_config: cache_ttl: 24h resync_interval: 5s shared_store: s3 + tsdb_shipper: + active_index_directory: /var/loki/index + cache_location: /var/loki/cache + cache_ttl: 24h + resync_interval: 5s + shared_store: s3 filesystem: directory: /var/loki/chunks table_manager: diff --git a/templates/distribution/manifests/logging/patches/minio.root.env.tpl b/templates/distribution/manifests/logging/patches/minio.root.env.tpl index e1ed7291c..0458f94b2 100644 --- a/templates/distribution/manifests/logging/patches/minio.root.env.tpl +++ b/templates/distribution/manifests/logging/patches/minio.root.env.tpl @@ -1,2 +1,2 @@ -ROOT_PASSWORD={{ .spec.distribution.modules.logging.minio.rootUser.password }} -ROOT_USER={{ .spec.distribution.modules.logging.minio.rootUser.username }} +rootPassword={{ .spec.distribution.modules.logging.minio.rootUser.password }} +rootUser={{ .spec.distribution.modules.logging.minio.rootUser.username }} diff --git a/templates/distribution/manifests/logging/policies/common.yaml.tpl b/templates/distribution/manifests/logging/policies/common.yaml.tpl new file mode 100644 index 000000000..6fd8ddaad --- /dev/null +++ b/templates/distribution/manifests/logging/policies/common.yaml.tpl @@ -0,0 +1,41 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: deny-all + namespace: logging + labels: + cluster.kfd.sighup.io/module: logging +spec: + podSelector: {} + policyTypes: + - Egress + - Ingress +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: all-egress-kube-dns + namespace: logging + labels: + cluster.kfd.sighup.io/module: logging +spec: + podSelector: + matchLabels: {} + policyTypes: + - Egress + egress: + - to: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: kube-system + podSelector: + matchLabels: + k8s-app: kube-dns + ports: + - protocol: UDP + port: 53 diff --git a/templates/distribution/manifests/logging/policies/configs.yaml.tpl b/templates/distribution/manifests/logging/policies/configs.yaml.tpl new file mode 100644 index 000000000..05ff5e2d5 --- /dev/null +++ b/templates/distribution/manifests/logging/policies/configs.yaml.tpl @@ -0,0 +1,23 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: event-tailer-egress-kube-apiserver + namespace: logging + labels: + cluster.kfd.sighup.io/module: logging +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app.kubernetes.io/name: event-tailer + egress: + - ports: + - port: 6443 + protocol: TCP +--- diff --git a/templates/distribution/manifests/logging/policies/fluentbit.yaml.tpl b/templates/distribution/manifests/logging/policies/fluentbit.yaml.tpl new file mode 100644 index 000000000..48f6095a0 --- /dev/null +++ b/templates/distribution/manifests/logging/policies/fluentbit.yaml.tpl @@ -0,0 +1,66 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: fluentbit-egress-fluentd + namespace: logging + labels: + cluster.kfd.sighup.io/module: logging +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app.kubernetes.io/name: fluentbit + egress: + - ports: + # fluentd + - port: 24240 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: fluentbit-egress-kube-apiserver + namespace: logging + labels: + cluster.kfd.sighup.io/module: logging +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app.kubernetes.io/name: fluentbit + egress: + - ports: + - port: 6443 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: fluentbit-ingress-prometheus-metrics + namespace: logging + labels: + cluster.kfd.sighup.io/module: logging +spec: + policyTypes: + - Ingress + podSelector: + matchLabels: + app.kubernetes.io/name: fluentbit + ingress: + - from: + - podSelector: + matchLabels: + app.kubernetes.io/name: prometheus + namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: monitoring + ports: + - port: 2020 + protocol: TCP diff --git a/templates/distribution/manifests/logging/policies/fluentd.yaml.tpl b/templates/distribution/manifests/logging/policies/fluentd.yaml.tpl new file mode 100644 index 000000000..95adfac59 --- /dev/null +++ b/templates/distribution/manifests/logging/policies/fluentd.yaml.tpl @@ -0,0 +1,73 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: fluentd-egress-all + namespace: logging + labels: + cluster.kfd.sighup.io/module: logging + cluster.kfd.sighup.io/logging-backend: minio +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app.kubernetes.io/name: fluentd + egress: + - {} +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: fluentd-ingress-fluentbit + namespace: logging + labels: + cluster.kfd.sighup.io/module: logging +spec: + policyTypes: + - Ingress + podSelector: + matchLabels: + app.kubernetes.io/name: fluentd + ingress: + - from: + - podSelector: + matchLabels: + app.kubernetes.io/name: fluentbit + namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: logging + ports: + - port: 24240 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: fluentd-ingress-prometheus-metrics + namespace: logging + labels: + cluster.kfd.sighup.io/module: logging +spec: + policyTypes: + - Ingress + podSelector: + matchLabels: + app.kubernetes.io/name: fluentd + ingress: + - from: + - podSelector: + matchLabels: + app.kubernetes.io/name: prometheus + namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: monitoring + ports: + - port: 24231 + protocol: TCP +--- + diff --git a/templates/distribution/manifests/logging/policies/kustomization.yaml.tpl b/templates/distribution/manifests/logging/policies/kustomization.yaml.tpl new file mode 100644 index 000000000..17f67c2ef --- /dev/null +++ b/templates/distribution/manifests/logging/policies/kustomization.yaml.tpl @@ -0,0 +1,26 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +{{ $loggingType := .spec.distribution.modules.logging.type }} + +resources: + - common.yaml + - configs.yaml + - fluentbit.yaml + - fluentd.yaml + - logging-operator.yaml + - minio.yaml + +{{- if eq $loggingType "loki" }} + - loki.yaml +{{- end }} + +{{- if eq $loggingType "opensearch" }} + - opensearch-dashboards.yaml + - opensearch.yaml +{{- end }} diff --git a/templates/distribution/manifests/logging/policies/logging-operator.yaml.tpl b/templates/distribution/manifests/logging/policies/logging-operator.yaml.tpl new file mode 100644 index 000000000..bc0a2cccd --- /dev/null +++ b/templates/distribution/manifests/logging/policies/logging-operator.yaml.tpl @@ -0,0 +1,22 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: logging-operator-egress-kube-apiserver + namespace: logging + labels: + cluster.kfd.sighup.io/module: logging +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app.kubernetes.io/name: logging-operator + egress: + - ports: + - port: 6443 + protocol: TCP diff --git a/templates/distribution/manifests/logging/policies/loki.yaml.tpl b/templates/distribution/manifests/logging/policies/loki.yaml.tpl new file mode 100644 index 000000000..7bae584c3 --- /dev/null +++ b/templates/distribution/manifests/logging/policies/loki.yaml.tpl @@ -0,0 +1,150 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: loki-distributed-ingress-fluentd + namespace: logging + labels: + cluster.kfd.sighup.io/module: logging + cluster.kfd.sighup.io/logging-type: loki +spec: + policyTypes: + - Ingress + podSelector: + matchLabels: + app.kubernetes.io/name: loki-distributed + app.kubernetes.io/component: gateway + ingress: + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: logging + podSelector: + matchLabels: + app.kubernetes.io/name: fluentd + ports: + - port: 8080 + protocol: TCP + - port: 3100 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: loki-distributed-ingress-grafana + namespace: logging + labels: + cluster.kfd.sighup.io/module: logging + cluster.kfd.sighup.io/logging-type: loki +spec: + policyTypes: + - Ingress + podSelector: + matchLabels: + app.kubernetes.io/name: loki-distributed + app.kubernetes.io/component: gateway + ingress: + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: monitoring + podSelector: + matchLabels: + app.kubernetes.io/name: grafana + ports: + - port: 8080 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: loki-distributed-ingress-prometheus-metrics + namespace: logging + labels: + cluster.kfd.sighup.io/module: logging + cluster.kfd.sighup.io/logging-type: loki +spec: + policyTypes: + - Ingress + podSelector: + matchLabels: + app.kubernetes.io/name: loki-distributed + ingress: + - ports: + - port: 3100 + protocol: TCP + from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: monitoring + podSelector: + matchLabels: + app.kubernetes.io/name: prometheus +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: loki-distributed-discovery + namespace: logging + labels: + cluster.kfd.sighup.io/module: logging + cluster.kfd.sighup.io/logging-type: loki +spec: + policyTypes: + - Ingress + - Egress + podSelector: + matchLabels: + app.kubernetes.io/name: loki-distributed + ingress: + - ports: + - port: 9095 + protocol: TCP + - port: 3100 + protocol: TCP + - port: 7946 + protocol: TCP + from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: logging + podSelector: + matchLabels: + app.kubernetes.io/name: loki-distributed + egress: + - ports: + - port: 9095 + protocol: TCP + - port: 3100 + protocol: TCP + - port: 7946 + protocol: TCP + to: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: logging + podSelector: + matchLabels: + app.kubernetes.io/name: loki-distributed +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: loki-distributed-egress-all + namespace: logging + labels: + cluster.kfd.sighup.io/module: logging + cluster.kfd.sighup.io/logging-type: loki +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app.kubernetes.io/name: loki-distributed + egress: + - {} +--- diff --git a/templates/distribution/manifests/logging/policies/minio.yaml.tpl b/templates/distribution/manifests/logging/policies/minio.yaml.tpl new file mode 100644 index 000000000..09c6ffa34 --- /dev/null +++ b/templates/distribution/manifests/logging/policies/minio.yaml.tpl @@ -0,0 +1,178 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: minio-ingress-namespace + namespace: logging + labels: + cluster.kfd.sighup.io/module: logging + cluster.kfd.sighup.io/logging-backend: minio +spec: + policyTypes: + - Ingress + - Egress + podSelector: + matchLabels: + app: minio + ingress: + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: logging + ports: + - port: 9000 + protocol: TCP + egress: + - to: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: logging + podSelector: + matchLabels: + app: minio + ports: + - port: 9000 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: minio-buckets-setup-egress-kube-apiserver + namespace: logging + labels: + cluster.kfd.sighup.io/module: logging + cluster.kfd.sighup.io/logging-backend: minio +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app: minio-logging-buckets-setup + egress: + - ports: + - port: 6443 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: minio-buckets-setup-egress-minio + namespace: logging + labels: + cluster.kfd.sighup.io/module: logging + cluster.kfd.sighup.io/logging-backend: minio +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app: minio-logging-buckets-setup + egress: + - ports: + - port: 9000 + protocol: TCP + to: + - podSelector: + matchLabels: + app: minio + namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: logging + +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: minio-ingress-prometheus-metrics + namespace: logging + labels: + cluster.kfd.sighup.io/module: logging + cluster.kfd.sighup.io/logging-backend: minio +spec: + policyTypes: + - Ingress + podSelector: + matchLabels: + app: minio + ingress: + - ports: + - port: 9000 + protocol: TCP + from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: monitoring + podSelector: + matchLabels: + app.kubernetes.io/name: prometheus +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: minio-egress-https + namespace: logging + labels: + cluster.kfd.sighup.io/module: logging + cluster.kfd.sighup.io/logging-backend: minio +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app: minio + egress: + - ports: + - port: 443 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: minio-ingress-nginx + namespace: logging + labels: + cluster.kfd.sighup.io/module: logging + cluster.kfd.sighup.io/logging-backend: minio +spec: + policyTypes: + - Ingress + podSelector: + matchLabels: + app: minio + ingress: +# single nginx, no sso +{{ if and (eq .spec.distribution.modules.ingress.nginx.type "single") (ne .spec.distribution.modules.auth.provider.type "sso") }} + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: ingress-nginx + podSelector: + matchLabels: + app: ingress-nginx +# dual nginx, no sso +{{ else if and (eq .spec.distribution.modules.ingress.nginx.type "dual") (ne .spec.distribution.modules.auth.provider.type "sso") }} + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: ingress-nginx + podSelector: + matchLabels: + app: ingress +# sso +{{ else if (eq .spec.distribution.modules.auth.provider.type "sso") }} + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: pomerium + podSelector: + matchLabels: + app: pomerium +{{ end }} + ports: + - port: 9001 + protocol: TCP +--- diff --git a/templates/distribution/manifests/logging/policies/opensearch-dashboards.yaml.tpl b/templates/distribution/manifests/logging/policies/opensearch-dashboards.yaml.tpl new file mode 100644 index 000000000..6a8fb98cc --- /dev/null +++ b/templates/distribution/manifests/logging/policies/opensearch-dashboards.yaml.tpl @@ -0,0 +1,118 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: opensearch-dashboards-egress-opensearch + namespace: logging + labels: + cluster.kfd.sighup.io/module: logging + cluster.kfd.sighup.io/logging-type: opensearch +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app: opensearch-dashboards + egress: + - to: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: logging + podSelector: + matchLabels: + app.kubernetes.io/name: opensearch + ports: + - port: 9200 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: opensearch-dashboards-ingress-jobs + namespace: logging + labels: + cluster.kfd.sighup.io/module: logging + cluster.kfd.sighup.io/logging-type: opensearch +spec: + policyTypes: + - Ingress + podSelector: + matchLabels: + app: opensearch-dashboards + release: opensearch-dashboards + ingress: + - from: + - podSelector: + matchLabels: + app.kubernetes.io/name: opensearch-dashboards + app.kubernetes.io/instance: opensearch-dashboards + ports: + - port: 5601 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: opensearch-dashboards-ingress-nginx + namespace: logging + labels: + cluster.kfd.sighup.io/module: logging + cluster.kfd.sighup.io/logging-type: opensearch +spec: + policyTypes: + - Ingress + podSelector: + matchLabels: + app: opensearch-dashboards + ingress: + - from: + - namespaceSelector: +{{- if (eq .spec.distribution.modules.auth.provider.type "sso") }} + matchLabels: + kubernetes.io/metadata.name: pomerium +{{ else }} + matchLabels: + kubernetes.io/metadata.name: ingress-nginx +{{- end }} + podSelector: + matchLabels: +{{- if (eq .spec.distribution.modules.auth.provider.type "sso") }} + app: pomerium +{{- else if eq .spec.distribution.modules.ingress.nginx.type "dual" }} + app: ingress +{{- else if eq .spec.distribution.modules.ingress.nginx.type "single" }} + app: ingress-nginx +{{- end }} + ports: + - port: 5601 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: jobs-egress-opensearch-dashboards + namespace: logging + labels: + cluster.kfd.sighup.io/module: logging + cluster.kfd.sighup.io/logging-type: opensearch +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app.kubernetes.io/name: opensearch-dashboards + app.kubernetes.io/instance: opensearch-dashboards + egress: + - to: + - podSelector: + matchLabels: + app: opensearch-dashboards + release: opensearch-dashboards + ports: + - port: 5601 + protocol: TCP +--- diff --git a/templates/distribution/manifests/logging/policies/opensearch.yaml.tpl b/templates/distribution/manifests/logging/policies/opensearch.yaml.tpl new file mode 100644 index 000000000..fccfeae54 --- /dev/null +++ b/templates/distribution/manifests/logging/policies/opensearch.yaml.tpl @@ -0,0 +1,169 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: opensearch-ingress-dashboards + namespace: logging + labels: + cluster.kfd.sighup.io/module: logging + cluster.kfd.sighup.io/logging-type: opensearch +spec: + policyTypes: + - Ingress + podSelector: + matchLabels: + app.kubernetes.io/name: opensearch + ingress: + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: logging + podSelector: + matchLabels: + app: opensearch-dashboards + ports: + - port: 9200 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: opensearch-ingress-fluentd + namespace: logging + labels: + cluster.kfd.sighup.io/module: logging + cluster.kfd.sighup.io/logging-type: opensearch +spec: + policyTypes: + - Ingress + podSelector: + matchLabels: + app.kubernetes.io/name: opensearch + ingress: + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: logging + podSelector: + matchLabels: + app.kubernetes.io/name: fluentd + ports: + - port: 9200 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: opensearch-discovery + namespace: logging + labels: + cluster.kfd.sighup.io/module: logging + cluster.kfd.sighup.io/logging-type: opensearch +spec: + policyTypes: + - Ingress + - Egress + podSelector: + matchLabels: + app.kubernetes.io/name: opensearch + ingress: + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: logging + podSelector: + matchLabels: + app.kubernetes.io/name: opensearch + ports: + - port: 9300 + protocol: TCP + egress: + - to: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: logging + podSelector: + matchLabels: + app.kubernetes.io/name: opensearch + ports: + - port: 9300 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: opensearch-ingress-prometheus-metrics + namespace: logging + labels: + cluster.kfd.sighup.io/module: logging + cluster.kfd.sighup.io/logging-type: opensearch +spec: + policyTypes: + - Ingress + podSelector: + matchLabels: + app.kubernetes.io/name: opensearch + ingress: + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: monitoring + podSelector: + matchLabels: + app.kubernetes.io/name: prometheus + ports: + - port: 9108 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: opensearch-ingress-jobs + namespace: logging + labels: + cluster.kfd.sighup.io/module: logging + cluster.kfd.sighup.io/logging-type: opensearch +spec: + policyTypes: + - Ingress + podSelector: + matchLabels: + app.kubernetes.io/name: opensearch + ingress: + - from: + - podSelector: + matchLabels: + app.kubernetes.io/name: opensearch + app.kubernetes.io/instance: opensearch + ports: + - port: 9200 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: jobs-egress-opensearch + namespace: logging + labels: + cluster.kfd.sighup.io/module: logging + cluster.kfd.sighup.io/logging-type: opensearch +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app.kubernetes.io/name: opensearch + app.kubernetes.io/instance: opensearch + egress: + - to: + - podSelector: + matchLabels: + app.kubernetes.io/name: opensearch + ports: + - port: 9200 + protocol: TCP +--- \ No newline at end of file diff --git a/templates/distribution/manifests/logging/resources/ingress-infra.yml.tpl b/templates/distribution/manifests/logging/resources/ingress-infra.yml.tpl index 36057ba6c..83a453240 100644 --- a/templates/distribution/manifests/logging/resources/ingress-infra.yml.tpl +++ b/templates/distribution/manifests/logging/resources/ingress-infra.yml.tpl @@ -65,7 +65,7 @@ metadata: forecastle.stakater.com/icon: "https://min.io/resources/img/logo/MINIO_Bird.png" {{ if not .spec.distribution.modules.logging.overrides.ingresses.minio.disableAuth }}{{ template "ingressAuth" . }}{{ end }} {{ template "certManagerClusterIssuer" . }} - + {{ if and (not .spec.distribution.modules.logging.overrides.ingresses.minio.disableAuth) (eq .spec.distribution.modules.auth.provider.type "sso") }} name: minio-logging namespace: pomerium diff --git a/templates/distribution/manifests/monitoring/kustomization.yaml.tpl b/templates/distribution/manifests/monitoring/kustomization.yaml.tpl index e1bf8735e..955daf763 100644 --- a/templates/distribution/manifests/monitoring/kustomization.yaml.tpl +++ b/templates/distribution/manifests/monitoring/kustomization.yaml.tpl @@ -54,8 +54,21 @@ resources: - secrets/alertmanager.yml {{- end }} +{{ if eq .spec.distribution.common.networkPoliciesEnabled true }} + - policies +{{- end }} + patchesStrategicMerge: - patches/infra-nodes.yml +{{- if eq .spec.distribution.common.provider.type "eks" }}{{/* in EKS there are no files to monitor on nodes */}} + - |- + $patch: delete + apiVersion: apps/v1 + kind: DaemonSet + metadata: + namespace: monitoring + name: x509-certificate-exporter-data-plane +{{- end }} {{- if or (eq $monitoringType "prometheus") (eq $monitoringType "mimir") }} - patches/alertmanager-operated.yml {{- if .checks.storageClassAvailable }} diff --git a/templates/distribution/manifests/monitoring/patches/grafana.ini.tpl b/templates/distribution/manifests/monitoring/patches/grafana.ini.tpl index 0e1c5844a..4be17d419 100644 --- a/templates/distribution/manifests/monitoring/patches/grafana.ini.tpl +++ b/templates/distribution/manifests/monitoring/patches/grafana.ini.tpl @@ -6,7 +6,7 @@ signout_redirect_url = https://{{ template "grafanaUrl" .spec }}/.pomerium/sign_ enabled = true header_name = X-Pomerium-Jwt-Assertion email_claim = email -jwk_set_url = https://{{ template "grafanaUrl" .spec }}/.well-known/pomerium/jwks.json +jwk_set_url = https://{{ template "pomeriumUrl" .spec }}/.well-known/pomerium/jwks.json cache_ttl = 60m username_claim = sub auto_sign_up = true diff --git a/templates/distribution/manifests/monitoring/patches/infra-nodes.yml.tpl b/templates/distribution/manifests/monitoring/patches/infra-nodes.yml.tpl index de5020c3e..62f3e2396 100644 --- a/templates/distribution/manifests/monitoring/patches/infra-nodes.yml.tpl +++ b/templates/distribution/manifests/monitoring/patches/infra-nodes.yml.tpl @@ -114,6 +114,7 @@ spec: {{ template "nodeSelector" $x509ExporterArgs }} tolerations: {{ template "tolerations" $x509ExporterArgs }} +{{- if ne .spec.distribution.common.provider.type "eks" }}{{/* in EKS there are no files to monitor on nodes */}} --- apiVersion: apps/v1 kind: DaemonSet @@ -127,6 +128,7 @@ spec: {{ template "nodeSelector" $x509ExporterArgs }} tolerations: {{ template "tolerations" $x509ExporterArgs }} +{{- end }} {{ if eq $monitoringType "mimir" -}} {{- $mimirArgs := dict "module" "monitoring" "package" "mimir" "spec" .spec -}} diff --git a/templates/distribution/manifests/monitoring/patches/minio.root.env.tpl b/templates/distribution/manifests/monitoring/patches/minio.root.env.tpl index 15de617fa..0dec09ed2 100644 --- a/templates/distribution/manifests/monitoring/patches/minio.root.env.tpl +++ b/templates/distribution/manifests/monitoring/patches/minio.root.env.tpl @@ -1,2 +1,2 @@ -ROOT_PASSWORD={{ .spec.distribution.modules.monitoring.minio.rootUser.password }} -ROOT_USER={{ .spec.distribution.modules.monitoring.minio.rootUser.username }} +rootPassword={{ .spec.distribution.modules.monitoring.minio.rootUser.password }} +rootUser={{ .spec.distribution.modules.monitoring.minio.rootUser.username }} diff --git a/templates/distribution/manifests/monitoring/policies/alertmanager.yaml.tpl b/templates/distribution/manifests/monitoring/policies/alertmanager.yaml.tpl new file mode 100644 index 000000000..2ed8a7215 --- /dev/null +++ b/templates/distribution/manifests/monitoring/policies/alertmanager.yaml.tpl @@ -0,0 +1,44 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +# source: https://github.com/prometheus-operator/kube-prometheus/blob/main/manifests/alertmanager-networkPolicy.yaml +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: alertmanager-main + namespace: monitoring + labels: + cluster.kfd.sighup.io/module: monitoring +spec: + egress: + - {} + ingress: + - from: + - podSelector: + matchLabels: + app.kubernetes.io/name: prometheus + ports: + - port: 9093 + protocol: TCP + - port: 8080 + protocol: TCP + - from: + - podSelector: + matchLabels: + app.kubernetes.io/name: alertmanager + ports: + - port: 9094 + protocol: TCP + - port: 9094 + protocol: UDP + podSelector: + matchLabels: + app.kubernetes.io/component: alert-router + app.kubernetes.io/instance: main + app.kubernetes.io/name: alertmanager + app.kubernetes.io/part-of: kube-prometheus + policyTypes: + - Egress + - Ingress + \ No newline at end of file diff --git a/templates/distribution/manifests/monitoring/policies/blackbox-exporter.yaml.tpl b/templates/distribution/manifests/monitoring/policies/blackbox-exporter.yaml.tpl new file mode 100644 index 000000000..c8b4745c7 --- /dev/null +++ b/templates/distribution/manifests/monitoring/policies/blackbox-exporter.yaml.tpl @@ -0,0 +1,35 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +# source: https://github.com/prometheus-operator/kube-prometheus/blob/main/manifests/blackboxExporter-networkPolicy.yaml +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: blackbox-exporter + namespace: monitoring + labels: + cluster.kfd.sighup.io/module: monitoring +spec: + egress: + - {} + ingress: + - from: + - podSelector: + matchLabels: + app.kubernetes.io/name: prometheus + ports: + - port: 9115 + protocol: TCP + - port: 19115 + protocol: TCP + podSelector: + matchLabels: + app.kubernetes.io/component: exporter + app.kubernetes.io/name: blackbox-exporter + app.kubernetes.io/part-of: kube-prometheus + policyTypes: + - Egress + - Ingress + \ No newline at end of file diff --git a/templates/distribution/manifests/monitoring/policies/common.yaml.tpl b/templates/distribution/manifests/monitoring/policies/common.yaml.tpl new file mode 100644 index 000000000..9ca8ec757 --- /dev/null +++ b/templates/distribution/manifests/monitoring/policies/common.yaml.tpl @@ -0,0 +1,44 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: deny-all + namespace: monitoring + labels: + cluster.kfd.sighup.io/module: monitoring +spec: + podSelector: {} + policyTypes: + - Egress + - Ingress +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: all-egress-kube-dns + namespace: monitoring + labels: + cluster.kfd.sighup.io/module: monitoring +spec: + podSelector: + matchLabels: {} + policyTypes: + - Egress + egress: + - to: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: kube-system + podSelector: + matchLabels: + k8s-app: kube-dns + ports: + - protocol: UDP + port: 53 + - protocol: TCP + port: 53 + \ No newline at end of file diff --git a/templates/distribution/manifests/monitoring/policies/grafana.yaml.tpl b/templates/distribution/manifests/monitoring/policies/grafana.yaml.tpl new file mode 100644 index 000000000..95b548e7b --- /dev/null +++ b/templates/distribution/manifests/monitoring/policies/grafana.yaml.tpl @@ -0,0 +1,82 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +{{- $monitoringType := .spec.distribution.modules.monitoring.type }} + +# source: https://github.com/prometheus-operator/kube-prometheus/blob/main/manifests/grafana-networkPolicy.yaml +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: grafana + namespace: monitoring + labels: + cluster.kfd.sighup.io/module: monitoring +spec: + egress: + - {} + ingress: + - from: + - podSelector: + matchLabels: + app.kubernetes.io/name: prometheus + ports: + - port: 3000 + protocol: TCP + podSelector: + matchLabels: + app.kubernetes.io/component: grafana + app.kubernetes.io/name: grafana + app.kubernetes.io/part-of: kube-prometheus + policyTypes: + - Egress + - Ingress +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: grafana-ingress-nginx + namespace: monitoring + labels: + cluster.kfd.sighup.io/module: monitoring +spec: + podSelector: + matchLabels: + app.kubernetes.io/component: grafana + app.kubernetes.io/name: grafana + app.kubernetes.io/part-of: kube-prometheus + policyTypes: + - Ingress + ingress: +# single nginx, no sso +{{if and (eq .spec.distribution.modules.ingress.nginx.type "single") (ne .spec.distribution.modules.auth.provider.type "sso") }} + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: ingress-nginx + podSelector: + matchLabels: + app: ingress-nginx +# dual nginx, no sso +{{ else if and (eq .spec.distribution.modules.ingress.nginx.type "dual") (ne .spec.distribution.modules.auth.provider.type "sso") }} + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: ingress-nginx + podSelector: + matchLabels: + app: ingress +# sso +{{ else if (eq .spec.distribution.modules.auth.provider.type "sso") }} + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: pomerium + podSelector: + matchLabels: + app: pomerium +{{ end }} + ports: + - port: 3000 + protocol: TCP +--- diff --git a/templates/distribution/manifests/monitoring/policies/ingress.yaml.tpl b/templates/distribution/manifests/monitoring/policies/ingress.yaml.tpl new file mode 100644 index 000000000..759609694 --- /dev/null +++ b/templates/distribution/manifests/monitoring/policies/ingress.yaml.tpl @@ -0,0 +1,103 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: prometheus-ingress-nginx + namespace: monitoring + labels: + cluster.kfd.sighup.io/module: monitoring +spec: + podSelector: + matchLabels: + app.kubernetes.io/component: prometheus + app.kubernetes.io/instance: k8s + app.kubernetes.io/name: prometheus + app.kubernetes.io/part-of: kube-prometheus + policyTypes: + - Ingress + ingress: +# single nginx, no sso +{{if and (eq .spec.distribution.modules.ingress.nginx.type "single") (ne .spec.distribution.modules.auth.provider.type "sso") }} + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: ingress-nginx + podSelector: + matchLabels: + app: ingress-nginx +# dual nginx, no sso +{{ else if and (eq .spec.distribution.modules.ingress.nginx.type "dual") (ne .spec.distribution.modules.auth.provider.type "sso") }} + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: ingress-nginx + podSelector: + matchLabels: + app: ingress +# sso +{{ else if (eq .spec.distribution.modules.auth.provider.type "sso") }} + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: pomerium + podSelector: + matchLabels: + app: pomerium +{{ end }} + ports: + - port: 9090 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: alertmanager-ingress-nginx + namespace: monitoring + labels: + cluster.kfd.sighup.io/module: monitoring +spec: + podSelector: + matchLabels: + app.kubernetes.io/component: alert-router + app.kubernetes.io/instance: main + app.kubernetes.io/name: alertmanager + app.kubernetes.io/part-of: kube-prometheus + policyTypes: + - Ingress + ingress: +# single nginx, no sso +{{if and (eq .spec.distribution.modules.ingress.nginx.type "single") (ne .spec.distribution.modules.auth.provider.type "sso") }} + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: ingress-nginx + podSelector: + matchLabels: + app: ingress-nginx +# dual nginx, no sso +{{ else if and (eq .spec.distribution.modules.ingress.nginx.type "dual") (ne .spec.distribution.modules.auth.provider.type "sso") }} + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: ingress-nginx + podSelector: + matchLabels: + app: ingress +# sso +{{ else if (eq .spec.distribution.modules.auth.provider.type "sso") }} + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: pomerium + podSelector: + matchLabels: + app: pomerium +{{ end }} + ports: + - port: 9093 + protocol: TCP +--- diff --git a/templates/distribution/manifests/monitoring/policies/kube-state-metrics.yaml.tpl b/templates/distribution/manifests/monitoring/policies/kube-state-metrics.yaml.tpl new file mode 100644 index 000000000..0851cf907 --- /dev/null +++ b/templates/distribution/manifests/monitoring/policies/kube-state-metrics.yaml.tpl @@ -0,0 +1,34 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +# source: https://github.com/prometheus-operator/kube-prometheus/blob/main/manifests/kubeStateMetrics-networkPolicy.yaml +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: kube-state-metrics + namespace: monitoring + labels: + cluster.kfd.sighup.io/module: monitoring +spec: + egress: + - {} + ingress: + - from: + - podSelector: + matchLabels: + app.kubernetes.io/name: prometheus + ports: + - port: 8443 + protocol: TCP + - port: 9443 + protocol: TCP + podSelector: + matchLabels: + app.kubernetes.io/component: exporter + app.kubernetes.io/name: kube-state-metrics + app.kubernetes.io/part-of: kube-prometheus + policyTypes: + - Egress + - Ingress + \ No newline at end of file diff --git a/templates/distribution/manifests/monitoring/policies/kustomization.yaml.tpl b/templates/distribution/manifests/monitoring/policies/kustomization.yaml.tpl new file mode 100644 index 000000000..0fa4c4391 --- /dev/null +++ b/templates/distribution/manifests/monitoring/policies/kustomization.yaml.tpl @@ -0,0 +1,33 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +{{- $monitoringType := .spec.distribution.modules.monitoring.type }} +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: + - common.yaml + - prometheus-operator.yaml + - kube-state-metrics.yaml + - node-exporter.yaml + - x509-exporter.yaml + - blackbox-exporter.yaml + +{{- if or (eq $monitoringType "prometheus") (eq $monitoringType "mimir") }} + - alertmanager.yaml + - prometheus-adapter.yaml + - grafana.yaml + - prometheus.yaml +{{- end }} +{{- if eq $monitoringType "mimir" }} + - mimir.yaml +{{- if eq .spec.distribution.modules.monitoring.mimir.backend "minio" }} + - minio.yaml +{{- end }} +{{- end }} + +{{- if and (ne .spec.distribution.modules.ingress.nginx.type "none") }}{{/* we don't need ingresses for Prometheus in Agent mode */}} + - ingress.yaml +{{- end }} diff --git a/templates/distribution/manifests/monitoring/policies/mimir.yaml.tpl b/templates/distribution/manifests/monitoring/policies/mimir.yaml.tpl new file mode 100644 index 000000000..77dd0149d --- /dev/null +++ b/templates/distribution/manifests/monitoring/policies/mimir.yaml.tpl @@ -0,0 +1,191 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: mimir-distributed-ingress-prometheus-metrics + namespace: monitoring + labels: + cluster.kfd.sighup.io/module: monitoring + cluster.kfd.sighup.io/monitoring-type: mimir +spec: + policyTypes: + - Ingress + podSelector: + matchLabels: + app.kubernetes.io/name: mimir + ingress: + - ports: + - port: 8080 + protocol: TCP + from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: monitoring + podSelector: + matchLabels: + app.kubernetes.io/name: prometheus + +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: mimir-distributed-discovery + namespace: monitoring + labels: + cluster.kfd.sighup.io/module: monitoring + cluster.kfd.sighup.io/monitoring-type: mimir +spec: + policyTypes: + - Ingress + - Egress + podSelector: + matchLabels: + app.kubernetes.io/name: mimir + ingress: + - ports: + - port: 9095 + protocol: TCP + - port: 7946 + protocol: TCP + - port: 8080 + protocol: TCP + from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: monitoring + podSelector: + matchLabels: + app.kubernetes.io/name: mimir + egress: + - ports: + - port: 9095 + protocol: TCP + - port: 7946 + protocol: TCP + - port: 8080 + protocol: TCP + to: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: monitoring + podSelector: + matchLabels: + app.kubernetes.io/name: mimir +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: mimir-gateway-ingress-grafana + namespace: monitoring + labels: + cluster.kfd.sighup.io/module: monitoring + cluster.kfd.sighup.io/monitoring-type: mimir +spec: + policyTypes: + - Ingress + podSelector: + matchLabels: + app.kubernetes.io/component: gateway + app.kubernetes.io/instance: mimir-distributed + app.kubernetes.io/name: mimir + ingress: + - from: + - podSelector: + matchLabels: + app.kubernetes.io/name: grafana + app.kubernetes.io/component: grafana + ports: + - port: 8080 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: mimir-querier-egress-https + namespace: monitoring + labels: + cluster.kfd.sighup.io/module: monitoring + cluster.kfd.sighup.io/monitoring-type: mimir +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app.kubernetes.io/instance: mimir-distributed + app.kubernetes.io/name: mimir + app.kubernetes.io/component: querier + egress: + - ports: + - port: 443 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: mimir-ingester-egress-https + namespace: monitoring + labels: + cluster.kfd.sighup.io/module: monitoring + cluster.kfd.sighup.io/monitoring-type: mimir +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app.kubernetes.io/instance: mimir-distributed + app.kubernetes.io/name: mimir + app.kubernetes.io/component: ingester + egress: + - ports: + - port: 443 + protocol: TCP +{{- if eq .spec.distribution.modules.monitoring.mimir.backend "minio" }} +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: mimir-distributed-egress-minio + namespace: monitoring + labels: + cluster.kfd.sighup.io/module: monitoring + cluster.kfd.sighup.io/monitoring-type: mimir +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app.kubernetes.io/name: mimir + egress: + - to: + - podSelector: + matchLabels: + app: minio + namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: monitoring + ports: + - port: 9000 + protocol: TCP +{{- else }} +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: mimir-distributed-egress-all + namespace: monitoring + labels: + cluster.kfd.sighup.io/module: monitoring + cluster.kfd.sighup.io/monitoring-type: mimir +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app.kubernetes.io/name: mimir + egress: + - {} +{{- end }} diff --git a/templates/distribution/manifests/monitoring/policies/minio.yaml.tpl b/templates/distribution/manifests/monitoring/policies/minio.yaml.tpl new file mode 100644 index 000000000..2af4eae0e --- /dev/null +++ b/templates/distribution/manifests/monitoring/policies/minio.yaml.tpl @@ -0,0 +1,178 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: minio-ingress-namespace + namespace: monitoring + labels: + cluster.kfd.sighup.io/module: monitoring + cluster.kfd.sighup.io/monitoring-backend: minio +spec: + policyTypes: + - Ingress + - Egress + podSelector: + matchLabels: + app: minio + ingress: + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: monitoring + ports: + - port: 9000 + protocol: TCP + egress: + - to: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: monitoring + podSelector: + matchLabels: + app: minio + ports: + - port: 9000 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: minio-buckets-setup-egress-kube-apiserver + namespace: monitoring + labels: + cluster.kfd.sighup.io/module: monitoring + cluster.kfd.sighup.io/monitoring-backend: minio +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app: minio-monitoring-buckets-setup + egress: + - ports: + - port: 6443 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: minio-buckets-setup-egress-minio + namespace: monitoring + labels: + cluster.kfd.sighup.io/module: monitoring + cluster.kfd.sighup.io/monitoring-backend: minio +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app: minio-monitoring-buckets-setup + egress: + - ports: + - port: 9000 + protocol: TCP + to: + - podSelector: + matchLabels: + app: minio + namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: monitoring + +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: minio-ingress-prometheus-metrics + namespace: monitoring + labels: + cluster.kfd.sighup.io/module: monitoring + cluster.kfd.sighup.io/monitoring-backend: minio +spec: + policyTypes: + - Ingress + podSelector: + matchLabels: + app: minio + ingress: + - ports: + - port: 9000 + protocol: TCP + from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: monitoring + podSelector: + matchLabels: + app.kubernetes.io/name: prometheus +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: minio-monitoring-egress-https + namespace: monitoring + labels: + cluster.kfd.sighup.io/module: monitoring + cluster.kfd.sighup.io/monitoring-backend: minio +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app: minio + egress: + - ports: + - port: 443 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: minio-ingress-nginx + namespace: monitoring + labels: + cluster.kfd.sighup.io/module: monitoring + cluster.kfd.sighup.io/monitoring-backend: minio +spec: + policyTypes: + - Ingress + podSelector: + matchLabels: + app: minio + ingress: +# single nginx, no sso +{{ if and (eq .spec.distribution.modules.ingress.nginx.type "single") (ne .spec.distribution.modules.auth.provider.type "sso") }} + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: ingress-nginx + podSelector: + matchLabels: + app: ingress-nginx +# dual nginx, no sso +{{ else if and (eq .spec.distribution.modules.ingress.nginx.type "dual") (ne .spec.distribution.modules.auth.provider.type "sso") }} + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: ingress-nginx + podSelector: + matchLabels: + app: ingress +# sso +{{ else if (eq .spec.distribution.modules.auth.provider.type "sso") }} + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: pomerium + podSelector: + matchLabels: + app: pomerium +{{ end }} + ports: + - port: 9001 + protocol: TCP +--- diff --git a/templates/distribution/manifests/monitoring/policies/node-exporter.yaml.tpl b/templates/distribution/manifests/monitoring/policies/node-exporter.yaml.tpl new file mode 100644 index 000000000..4b06c7ece --- /dev/null +++ b/templates/distribution/manifests/monitoring/policies/node-exporter.yaml.tpl @@ -0,0 +1,32 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +# source: https://github.com/prometheus-operator/kube-prometheus/blob/main/manifests/nodeExporter-networkPolicy.yaml +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: node-exporter + namespace: monitoring + labels: + cluster.kfd.sighup.io/module: monitoring +spec: + egress: + - {} + ingress: + - from: + - podSelector: + matchLabels: + app.kubernetes.io/name: prometheus + ports: + - port: 9100 + protocol: TCP + podSelector: + matchLabels: + app.kubernetes.io/component: exporter + app.kubernetes.io/name: node-exporter + app.kubernetes.io/part-of: kube-prometheus + policyTypes: + - Egress + - Ingress + \ No newline at end of file diff --git a/templates/distribution/manifests/monitoring/policies/prometheus-adapter.yaml.tpl b/templates/distribution/manifests/monitoring/policies/prometheus-adapter.yaml.tpl new file mode 100644 index 000000000..7f26d2dd5 --- /dev/null +++ b/templates/distribution/manifests/monitoring/policies/prometheus-adapter.yaml.tpl @@ -0,0 +1,50 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +# source: https://github.com/prometheus-operator/kube-prometheus/blob/main/manifests/prometheusAdapter-networkPolicy.yaml +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: prometheus-adapter + namespace: monitoring + labels: + cluster.kfd.sighup.io/module: monitoring +spec: + egress: + - {} + ingress: + - {} + podSelector: + matchLabels: + app.kubernetes.io/component: metrics-adapter + app.kubernetes.io/name: prometheus-adapter + app.kubernetes.io/part-of: kube-prometheus + policyTypes: + - Egress + - Ingress +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: prometheus-ingress-prometheus-adapter + namespace: monitoring + labels: + cluster.kfd.sighup.io/module: monitoring +spec: + policyTypes: + - Ingress + podSelector: + matchLabels: + app.kubernetes.io/name: prometheus + ingress: + - from: + - podSelector: + matchLabels: + app.kubernetes.io/component: metrics-adapter + app.kubernetes.io/name: prometheus-adapter + app.kubernetes.io/part-of: kube-prometheus + ports: + - port: 9090 + protocol: TCP +--- diff --git a/templates/distribution/manifests/monitoring/policies/prometheus-operator.yaml.tpl b/templates/distribution/manifests/monitoring/policies/prometheus-operator.yaml.tpl new file mode 100644 index 000000000..d33974f30 --- /dev/null +++ b/templates/distribution/manifests/monitoring/policies/prometheus-operator.yaml.tpl @@ -0,0 +1,32 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +# source: https://github.com/prometheus-operator/kube-prometheus/blob/main/manifests/prometheusOperator-networkPolicy.yaml +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: prometheus-operator + namespace: monitoring + labels: + cluster.kfd.sighup.io/module: monitoring +spec: + egress: + - {} + ingress: + - from: + - podSelector: + matchLabels: + app.kubernetes.io/name: prometheus + ports: + - port: 8443 + protocol: TCP + podSelector: + matchLabels: + app.kubernetes.io/component: controller + app.kubernetes.io/name: prometheus-operator + app.kubernetes.io/part-of: kube-prometheus + policyTypes: + - Egress + - Ingress + \ No newline at end of file diff --git a/templates/distribution/manifests/monitoring/policies/prometheus.yaml.tpl b/templates/distribution/manifests/monitoring/policies/prometheus.yaml.tpl new file mode 100644 index 000000000..9d5fee209 --- /dev/null +++ b/templates/distribution/manifests/monitoring/policies/prometheus.yaml.tpl @@ -0,0 +1,166 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +{{- $monitoringType := .spec.distribution.modules.monitoring.type }} + +# source: https://github.com/prometheus-operator/kube-prometheus/blob/main/manifests/prometheus-networkPolicy.yaml +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: prometheus-k8s + namespace: monitoring +spec: + egress: + - {} + ingress: + - from: + - podSelector: + matchLabels: + app.kubernetes.io/name: prometheus + ports: + - port: 9090 + protocol: TCP + - port: 8080 + protocol: TCP + - from: + - podSelector: + matchLabels: + app.kubernetes.io/name: prometheus-adapter + ports: + - port: 9090 + protocol: TCP + - from: + - podSelector: + matchLabels: + app.kubernetes.io/name: grafana + ports: + - port: 9090 + protocol: TCP + podSelector: + matchLabels: + app.kubernetes.io/component: prometheus + app.kubernetes.io/instance: k8s + app.kubernetes.io/name: prometheus + app.kubernetes.io/part-of: kube-prometheus + policyTypes: + - Egress + - Ingress +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: prometheus-egress-minio + namespace: monitoring +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app.kubernetes.io/name: prometheus + egress: + - to: + - podSelector: + matchLabels: + app: minio + ports: + - port: 9000 + protocol: TCP +--- +{{- if eq $monitoringType "mimir" }} +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: prometheus-egress-mimir + namespace: monitoring +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app.kubernetes.io/name: prometheus + app.kubernetes.io/instance: k8s + egress: + - to: + - podSelector: + matchLabels: + app.kubernetes.io/component: gateway + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: mimir-distributed + ports: + - port: 8080 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: prometheus-egress-kube-apiserver + namespace: monitoring +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app.kubernetes.io/name: prometheus + egress: + - ports: + - port: 6443 + protocol: TCP + - port: 8405 + protocol: TCP +--- +{{- if eq .spec.distribution.modules.monitoring.mimir.backend "minio" }} +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: prometheus-egress-miniologging + namespace: monitoring + labels: + cluster.kfd.sighup.io/module: monitoring +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app.kubernetes.io/name: prometheus + egress: + - to: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: logging + podSelector: + matchLabels: + app: minio + ports: + - port: 9000 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: prometheus-egress-minio-monitoring + namespace: monitoring + labels: + cluster.kfd.sighup.io/module: monitoring +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app.kubernetes.io/name: prometheus + app.kubernetes.io/instance: k8s + egress: + - to: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: monitoring + podSelector: + matchLabels: + app: minio + ports: + - port: 9000 + protocol: TCP +--- +{{- end }} +{{- end }} + diff --git a/templates/distribution/manifests/monitoring/policies/x509-exporter.yaml.tpl b/templates/distribution/manifests/monitoring/policies/x509-exporter.yaml.tpl new file mode 100644 index 000000000..a89c3f207 --- /dev/null +++ b/templates/distribution/manifests/monitoring/policies/x509-exporter.yaml.tpl @@ -0,0 +1,45 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: x509-exporter-egress-kube-apiserver + namespace: monitoring + labels: + cluster.kfd.sighup.io/module: monitoring +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app: x509-certificate-exporter + egress: + - ports: + - port: 6443 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: x509-exporter-ingress-prometheus-metrics + namespace: monitoring + labels: + cluster.kfd.sighup.io/module: monitoring +spec: + policyTypes: + - Ingress + podSelector: + matchLabels: + app: x509-certificate-exporter + ingress: + - from: + - podSelector: + matchLabels: + app.kubernetes.io/name: prometheus + ports: + - port: 9793 + protocol: TCP + \ No newline at end of file diff --git a/templates/distribution/manifests/monitoring/resources/ingress-infra.yml.tpl b/templates/distribution/manifests/monitoring/resources/ingress-infra.yml.tpl index 3d4f1fbe2..ef87719f5 100644 --- a/templates/distribution/manifests/monitoring/resources/ingress-infra.yml.tpl +++ b/templates/distribution/manifests/monitoring/resources/ingress-infra.yml.tpl @@ -177,7 +177,7 @@ spec: number: 80 {{ else }} service: - name: minio-tracing-console + name: minio-monitoring-console port: name: http {{ end }} diff --git a/templates/distribution/manifests/monitoring/resources/prometheus-agent/prometheus-agent.yaml.tpl b/templates/distribution/manifests/monitoring/resources/prometheus-agent/prometheus-agent.yaml.tpl index 062a63c94..a616ece68 100644 --- a/templates/distribution/manifests/monitoring/resources/prometheus-agent/prometheus-agent.yaml.tpl +++ b/templates/distribution/manifests/monitoring/resources/prometheus-agent/prometheus-agent.yaml.tpl @@ -30,9 +30,7 @@ spec: probeSelector: {} serviceMonitorNamespaceSelector: {} serviceMonitorSelector: {} - scrapeConfigSelector: - matchLabels: - prometheus: k8s + scrapeConfigSelector: {} {{- $prometheusAgentArgs := dict "module" "monitoring" "package" "prometheusAgent" "spec" .spec }} tolerations: diff --git a/templates/distribution/manifests/networking/resources/ingress-infra.yml.tpl b/templates/distribution/manifests/networking/resources/ingress-infra.yml.tpl index 66744847c..b21b17f63 100644 --- a/templates/distribution/manifests/networking/resources/ingress-infra.yml.tpl +++ b/templates/distribution/manifests/networking/resources/ingress-infra.yml.tpl @@ -10,10 +10,13 @@ metadata: cluster.kfd.sighup.io/useful-link.enable: "true" annotations: cluster.kfd.sighup.io/useful-link.url: https://{{ template "hubbleUrl" .spec }} - cluster.kfd.sighup.io/useful-link.name: "Cilium Hubble" + cluster.kfd.sighup.io/useful-link.name: "Cilium Hubble UI" forecastle.stakater.com/expose: "true" - forecastle.stakater.com/appName: "Cilium Hubble" + forecastle.stakater.com/appName: "Cilium Hubble UI" forecastle.stakater.com/icon: "https://cilium.io/static/hubble-light-1-812e65cbb72a7f4efed59fcea48df840.svg" + {{ if and (not .spec.distribution.modules.ingress.overrides.ingresses.forecastle.disableAuth) (eq .spec.distribution.modules.auth.provider.type "sso") }} + forecastle.stakater.com/group: "networking" + {{ end }} {{ if not .spec.distribution.modules.networking.overrides.ingresses.hubble.disableAuth }}{{ template "ingressAuth" . }}{{ end }} {{ template "certManagerClusterIssuer" . }} name: hubble diff --git a/templates/distribution/manifests/opa/kustomization.yaml.tpl b/templates/distribution/manifests/opa/kustomization.yaml.tpl index 6b6672a33..b31532d5b 100644 --- a/templates/distribution/manifests/opa/kustomization.yaml.tpl +++ b/templates/distribution/manifests/opa/kustomization.yaml.tpl @@ -27,6 +27,10 @@ resources: {{- end }} {{- end }} +{{ if eq .spec.distribution.common.networkPoliciesEnabled true }} + - policies +{{- end }} + patchesStrategicMerge: - patches/infra-nodes.yml {{- if .spec.distribution.modules.policy.kyverno.additionalExcludedNamespaces }} diff --git a/templates/distribution/manifests/opa/policies/gatekeeper/audit.yaml.tpl b/templates/distribution/manifests/opa/policies/gatekeeper/audit.yaml.tpl new file mode 100644 index 000000000..10f8a1e52 --- /dev/null +++ b/templates/distribution/manifests/opa/policies/gatekeeper/audit.yaml.tpl @@ -0,0 +1,22 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: audit-controller-egress-kube-apiserver + namespace: gatekeeper-system + labels: + cluster.kfd.sighup.io/module: opa + cluster.kfd.sighup.io/policy-type: gatekeeper +spec: + podSelector: + matchLabels: + control-plane: audit-controller + policyTypes: + - Egress + egress: + - ports: + - port: 6443 + protocol: TCP diff --git a/templates/distribution/manifests/opa/policies/gatekeeper/common.yaml.tpl b/templates/distribution/manifests/opa/policies/gatekeeper/common.yaml.tpl new file mode 100644 index 000000000..ad51c243a --- /dev/null +++ b/templates/distribution/manifests/opa/policies/gatekeeper/common.yaml.tpl @@ -0,0 +1,43 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: deny-all + namespace: gatekeeper-system + labels: + cluster.kfd.sighup.io/module: opa + cluster.kfd.sighup.io/policy-type: gatekeeper +spec: + podSelector: {} + policyTypes: + - Egress + - Ingress +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: all-egress-dns + namespace: gatekeeper-system + labels: + cluster.kfd.sighup.io/module: opa + cluster.kfd.sighup.io/policy-type: gatekeeper +spec: + podSelector: + matchLabels: {} + policyTypes: + - Egress + egress: + - to: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: kube-system + podSelector: + matchLabels: + k8s-app: kube-dns + ports: + - protocol: UDP + port: 53 \ No newline at end of file diff --git a/templates/distribution/manifests/opa/policies/gatekeeper/controller-manager.yaml.tpl b/templates/distribution/manifests/opa/policies/gatekeeper/controller-manager.yaml.tpl new file mode 100644 index 000000000..75fed7196 --- /dev/null +++ b/templates/distribution/manifests/opa/policies/gatekeeper/controller-manager.yaml.tpl @@ -0,0 +1,43 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: controller-manager-egress-kube-apiserver + namespace: gatekeeper-system + labels: + cluster.kfd.sighup.io/module: opa + cluster.kfd.sighup.io/policy-type: gatekeeper +spec: + podSelector: + matchLabels: + control-plane: controller-manager + policyTypes: + - Egress + egress: + - ports: + - port: 6443 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: controller-manager-ingress-kube-apiserver + namespace: gatekeeper-system + labels: + cluster.kfd.sighup.io/module: opa + cluster.kfd.sighup.io/policy-type: gatekeeper +spec: + podSelector: + matchLabels: + control-plane: controller-manager + policyTypes: + - Ingress + ingress: + - ports: + - protocol: TCP + port: 8443 + - protocol: TCP + port: 443 diff --git a/templates/distribution/manifests/opa/policies/gatekeeper/gatekeeper-policy-manager.yaml.tpl b/templates/distribution/manifests/opa/policies/gatekeeper/gatekeeper-policy-manager.yaml.tpl new file mode 100644 index 000000000..84557ba3e --- /dev/null +++ b/templates/distribution/manifests/opa/policies/gatekeeper/gatekeeper-policy-manager.yaml.tpl @@ -0,0 +1,48 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: gpm-egress-kube-apiserver + namespace: gatekeeper-system + labels: + cluster.kfd.sighup.io/module: opa + cluster.kfd.sighup.io/policy-type: gatekeeper +spec: + podSelector: + matchLabels: + app: gatekeeper-policy-manager + policyTypes: + - Egress + egress: + - ports: + - port: 6443 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: gpm-ingress-pomerium + namespace: gatekeeper-system + labels: + cluster.kfd.sighup.io/module: opa + cluster.kfd.sighup.io/policy-type: gatekeeper +spec: + podSelector: + matchLabels: + app: gatekeeper-policy-manager + policyTypes: + - Ingress + ingress: + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: pomerium + podSelector: + matchLabels: + app: pomerium + ports: + - protocol: TCP + port: 8080 diff --git a/templates/distribution/manifests/opa/policies/gatekeeper/kustomization.yaml.tpl b/templates/distribution/manifests/opa/policies/gatekeeper/kustomization.yaml.tpl new file mode 100644 index 000000000..79f5cfce0 --- /dev/null +++ b/templates/distribution/manifests/opa/policies/gatekeeper/kustomization.yaml.tpl @@ -0,0 +1,15 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: + - common.yaml + - audit.yaml + - controller-manager.yaml + - gatekeeper-policy-manager.yaml + - prometheus-metrics.yaml + diff --git a/templates/distribution/manifests/opa/policies/gatekeeper/prometheus-metrics.yaml.tpl b/templates/distribution/manifests/opa/policies/gatekeeper/prometheus-metrics.yaml.tpl new file mode 100644 index 000000000..44cd7a68b --- /dev/null +++ b/templates/distribution/manifests/opa/policies/gatekeeper/prometheus-metrics.yaml.tpl @@ -0,0 +1,29 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: gatekeeper-ingress-prometheus-metrics + namespace: gatekeeper-system + labels: + cluster.kfd.sighup.io/module: opa + cluster.kfd.sighup.io/policy-type: gatekeeper +spec: + podSelector: + matchLabels: + gatekeeper.sh/system: "yes" + policyTypes: + - Ingress + ingress: + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: monitoring + podSelector: + matchLabels: + app.kubernetes.io/name: prometheus + ports: + - protocol: TCP + port: 8888 diff --git a/templates/distribution/manifests/opa/policies/kustomization.yaml.tpl b/templates/distribution/manifests/opa/policies/kustomization.yaml.tpl new file mode 100644 index 000000000..aed10dc32 --- /dev/null +++ b/templates/distribution/manifests/opa/policies/kustomization.yaml.tpl @@ -0,0 +1,16 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: +{{- if eq .spec.distribution.modules.policy.type "gatekeeper" }} + - gatekeeper +{{- end }} +{{- if eq .spec.distribution.modules.policy.type "kyverno" }} + - kyverno +{{- end }} + diff --git a/templates/distribution/manifests/opa/policies/kyverno/common.yaml.tpl b/templates/distribution/manifests/opa/policies/kyverno/common.yaml.tpl new file mode 100644 index 000000000..ccb1424a9 --- /dev/null +++ b/templates/distribution/manifests/opa/policies/kyverno/common.yaml.tpl @@ -0,0 +1,42 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: deny-all + namespace: kyverno + labels: + cluster.kfd.sighup.io/module: opa + cluster.kfd.sighup.io/policy-type: kyverno +spec: + podSelector: {} + policyTypes: + - Egress + - Ingress +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: all-egress-dns + namespace: kyverno + labels: + cluster.kfd.sighup.io/module: opa + cluster.kfd.sighup.io/policy-type: kyverno +spec: + podSelector: + matchLabels: {} + policyTypes: + - Egress + egress: + - to: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: kube-system + podSelector: + matchLabels: + k8s-app: kube-dns + ports: + - protocol: UDP + port: 53 diff --git a/templates/distribution/manifests/opa/policies/kyverno/kustomization.yaml.tpl b/templates/distribution/manifests/opa/policies/kyverno/kustomization.yaml.tpl new file mode 100644 index 000000000..77a88b0bb --- /dev/null +++ b/templates/distribution/manifests/opa/policies/kyverno/kustomization.yaml.tpl @@ -0,0 +1,11 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: + - common.yaml + - kyverno.yaml diff --git a/templates/distribution/manifests/opa/policies/kyverno/kyverno.yaml.tpl b/templates/distribution/manifests/opa/policies/kyverno/kyverno.yaml.tpl new file mode 100644 index 000000000..ff8c06b24 --- /dev/null +++ b/templates/distribution/manifests/opa/policies/kyverno/kyverno.yaml.tpl @@ -0,0 +1,117 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: kyverno-admission-egress-kube-apiserver + namespace: kyverno + labels: + cluster.kfd.sighup.io/module: opa + cluster.kfd.sighup.io/policy-type: kyverno +spec: + podSelector: + matchLabels: + app.kubernetes.io/component: admission-controller + policyTypes: + - Egress + egress: + - ports: + - protocol: TCP + port: 6443 +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: kyverno-admission-ingress-nodes + namespace: kyverno + labels: + cluster.kfd.sighup.io/module: opa + cluster.kfd.sighup.io/policy-type: kyverno +spec: + podSelector: + matchLabels: + app.kubernetes.io/component: admission-controller + policyTypes: + - Ingress + ingress: + - ports: + - protocol: TCP + port: 9443 +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: kyverno-background-egress-kube-apiserver + namespace: kyverno + labels: + cluster.kfd.sighup.io/module: opa + cluster.kfd.sighup.io/policy-type: kyverno +spec: + podSelector: + matchLabels: + app.kubernetes.io/component: background-controller + policyTypes: + - Egress + egress: + - ports: + - protocol: TCP + port: 6443 +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: kyverno-reports-egress-kube-apiserver + namespace: kyverno + labels: + cluster.kfd.sighup.io/module: opa + cluster.kfd.sighup.io/policy-type: kyverno +spec: + podSelector: + matchLabels: + app.kubernetes.io/component: reports-controller + policyTypes: + - Egress + egress: + - ports: + - protocol: TCP + port: 6443 +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: kyverno-cleanup-egress-kube-apiserver + namespace: kyverno + labels: + cluster.kfd.sighup.io/module: opa + cluster.kfd.sighup.io/policy-type: kyverno +spec: + podSelector: + matchLabels: + app.kubernetes.io/component: cleanup-controller + policyTypes: + - Egress + egress: + - ports: + - protocol: TCP + port: 6443 +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: kyverno-cleanup-reports-egress-kube-apiserver + namespace: kyverno + labels: + cluster.kfd.sighup.io/module: opa + cluster.kfd.sighup.io/policy-type: kyverno +spec: + podSelector: + matchExpressions: + - { key: "batch.kubernetes.io/job-name", operator: "Exists" } + policyTypes: + - Egress + egress: + - ports: + - protocol: TCP + port: 6443 diff --git a/templates/distribution/manifests/tracing/kustomization.yaml.tpl b/templates/distribution/manifests/tracing/kustomization.yaml.tpl index bf54f6130..aab87047e 100644 --- a/templates/distribution/manifests/tracing/kustomization.yaml.tpl +++ b/templates/distribution/manifests/tracing/kustomization.yaml.tpl @@ -17,6 +17,10 @@ resources: {{- end }} {{- end }} +{{ if eq .spec.distribution.common.networkPoliciesEnabled true }} + - policies +{{- end }} + patchesStrategicMerge: - patches/infra-nodes.yml {{- if eq .spec.distribution.modules.tracing.tempo.backend "minio" }} diff --git a/templates/distribution/manifests/tracing/patches/minio.root.env.tpl b/templates/distribution/manifests/tracing/patches/minio.root.env.tpl index a63a82680..333ad7378 100644 --- a/templates/distribution/manifests/tracing/patches/minio.root.env.tpl +++ b/templates/distribution/manifests/tracing/patches/minio.root.env.tpl @@ -1,2 +1,2 @@ -ROOT_PASSWORD={{ .spec.distribution.modules.tracing.minio.rootUser.password }} -ROOT_USER={{ .spec.distribution.modules.tracing.minio.rootUser.username }} +rootPassword={{ .spec.distribution.modules.tracing.minio.rootUser.password }} +rootUser={{ .spec.distribution.modules.tracing.minio.rootUser.username }} diff --git a/templates/distribution/manifests/tracing/patches/tempo.yaml.tpl b/templates/distribution/manifests/tracing/patches/tempo.yaml.tpl index b1cd52196..99ac37a25 100644 --- a/templates/distribution/manifests/tracing/patches/tempo.yaml.tpl +++ b/templates/distribution/manifests/tracing/patches/tempo.yaml.tpl @@ -86,13 +86,12 @@ querier: trace_by_id: query_timeout: 10s query_frontend: + max_outstanding_per_tenant: 2000 max_retries: 2 search: concurrent_jobs: 1000 target_bytes_per_job: 104857600 trace_by_id: - hedge_requests_at: 2s - hedge_requests_up_to: 2 query_shards: 50 server: grpc_server_max_recv_msg_size: 4194304 diff --git a/templates/distribution/manifests/tracing/policies/common.yaml.tpl b/templates/distribution/manifests/tracing/policies/common.yaml.tpl new file mode 100644 index 000000000..6727129eb --- /dev/null +++ b/templates/distribution/manifests/tracing/policies/common.yaml.tpl @@ -0,0 +1,42 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: deny-all + namespace: tracing + labels: + cluster.kfd.sighup.io/module: tracing +spec: + podSelector: {} + policyTypes: + - Egress + - Ingress +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: all-egress-kube-dns + namespace: tracing + labels: + cluster.kfd.sighup.io/module: tracing +spec: + podSelector: + matchLabels: {} + policyTypes: + - Egress + egress: + - to: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: kube-system + podSelector: + matchLabels: + k8s-app: kube-dns + ports: + - protocol: UDP + port: 53 + \ No newline at end of file diff --git a/templates/distribution/manifests/tracing/policies/kustomization.yaml.tpl b/templates/distribution/manifests/tracing/policies/kustomization.yaml.tpl new file mode 100644 index 000000000..96e0dff5c --- /dev/null +++ b/templates/distribution/manifests/tracing/policies/kustomization.yaml.tpl @@ -0,0 +1,14 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: + - common.yaml + - tempo.yaml +{{- if eq .spec.distribution.modules.tracing.tempo.backend "minio" }} + - minio.yaml +{{- end }} diff --git a/templates/distribution/manifests/tracing/policies/minio.yaml.tpl b/templates/distribution/manifests/tracing/policies/minio.yaml.tpl new file mode 100644 index 000000000..9e4244d78 --- /dev/null +++ b/templates/distribution/manifests/tracing/policies/minio.yaml.tpl @@ -0,0 +1,177 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: minio-ingress-namespace + namespace: tracing + labels: + cluster.kfd.sighup.io/module: tracing + cluster.kfd.sighup.io/tracing-backend: minio +spec: + policyTypes: + - Ingress + - Egress + podSelector: + matchLabels: + app: minio + ingress: + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: tracing + ports: + - port: 9000 + protocol: TCP + egress: + - to: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: tracing + podSelector: + matchLabels: + app: minio + ports: + - port: 9000 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: minio-buckets-setup-egress-kube-apiserver + namespace: tracing + labels: + cluster.kfd.sighup.io/module: tracing + cluster.kfd.sighup.io/tracing-backend: minio +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app: minio-tracing-buckets-setup + egress: + - ports: + - port: 6443 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: minio-buckets-setup-egress-minio + namespace: tracing + labels: + cluster.kfd.sighup.io/module: tracing + cluster.kfd.sighup.io/tracing-backend: minio +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app: minio-tracing-buckets-setup + egress: + - ports: + - port: 9000 + protocol: TCP + to: + - podSelector: + matchLabels: + app: minio + namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: tracing +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: minio-ingress-prometheus-metrics + namespace: tracing + labels: + cluster.kfd.sighup.io/module: tracing + cluster.kfd.sighup.io/tracing-backend: minio +spec: + policyTypes: + - Ingress + podSelector: + matchLabels: + app: minio + ingress: + - ports: + - port: 9000 + protocol: TCP + from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: monitoring + podSelector: + matchLabels: + app.kubernetes.io/name: prometheus +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: minio-ingress-pomerium + namespace: tracing + labels: + cluster.kfd.sighup.io/module: tracing + cluster.kfd.sighup.io/tracing-backend: minio +spec: + policyTypes: + - Ingress + podSelector: + matchLabels: + app: minio + ingress: +# single nginx, no sso +{{if and (eq .spec.distribution.modules.ingress.nginx.type "single") (ne .spec.distribution.modules.auth.provider.type "sso") }} + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: ingress-nginx + podSelector: + matchLabels: + app: ingress-nginx +# dual nginx, no sso +{{ else if and (eq .spec.distribution.modules.ingress.nginx.type "dual") (ne .spec.distribution.modules.auth.provider.type "sso") }} + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: ingress-nginx + podSelector: + matchLabels: + app: ingress +# sso +{{ else if (eq .spec.distribution.modules.auth.provider.type "sso") }} + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: pomerium + podSelector: + matchLabels: + app: pomerium +{{ end }} + ports: + - port: 9001 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: minio-egress-https + namespace: tracing + labels: + cluster.kfd.sighup.io/module: tracing + cluster.kfd.sighup.io/tracing-backend: minio +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app: minio + egress: + - ports: + - port: 443 + protocol: TCP +--- \ No newline at end of file diff --git a/templates/distribution/manifests/tracing/policies/tempo.yaml.tpl b/templates/distribution/manifests/tracing/policies/tempo.yaml.tpl new file mode 100644 index 000000000..09528ec3d --- /dev/null +++ b/templates/distribution/manifests/tracing/policies/tempo.yaml.tpl @@ -0,0 +1,255 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: tempo-distributed-discovery + namespace: tracing + labels: + cluster.kfd.sighup.io/module: tracing +spec: + policyTypes: + - Ingress + - Egress + podSelector: + matchLabels: + app.kubernetes.io/name: tempo + ingress: + - ports: + - port: 9095 + protocol: TCP + - port: 7946 + protocol: TCP + - port: 3100 + protocol: TCP + from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: tracing + podSelector: + matchLabels: + app.kubernetes.io/name: tempo + egress: + - ports: + - port: 9095 + protocol: TCP + - port: 7946 + protocol: TCP + - port: 3100 + protocol: TCP + to: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: tracing + podSelector: + matchLabels: + app.kubernetes.io/name: tempo +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: tempo-gateway-ingress-grafana + namespace: tracing + labels: + cluster.kfd.sighup.io/module: tracing +spec: + policyTypes: + - Ingress + podSelector: + matchLabels: + app.kubernetes.io/component: gateway + app.kubernetes.io/name: tempo + app.kubernetes.io/instance: tempo-distributed + ingress: + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: monitoring + podSelector: + matchLabels: + app.kubernetes.io/component: grafana + app.kubernetes.io/name: grafana + app.kubernetes.io/part-of: kube-prometheus + ports: + - port: 8080 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: all-egress-tempo-distributor + namespace: tracing + labels: + cluster.kfd.sighup.io/module: tracing +spec: + policyTypes: + - Egress + podSelector: {} + egress: + - to: + - podSelector: + matchLabels: + app.kubernetes.io/name: tempo + app.kubernetes.io/component: distributor + ports: + - port: 4317 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: tempo-distributor-ingress-traces + namespace: tracing + labels: + cluster.kfd.sighup.io/module: tracing +spec: + policyTypes: + - Ingress + podSelector: + matchLabels: + app.kubernetes.io/name: tempo + app.kubernetes.io/component: distributor + ingress: + - ports: + - port: 4317 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: tempo-components-egress-memcached + namespace: tracing + labels: + cluster.kfd.sighup.io/module: tracing +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app.kubernetes.io/instance: tempo-distributed + egress: + - to: + - podSelector: + matchLabels: + app.kubernetes.io/name: tempo + app.kubernetes.io/component: memcached + ports: + - port: 11211 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: memcached-ingress-querier + namespace: tracing + labels: + cluster.kfd.sighup.io/module: tracing +spec: + policyTypes: + - Ingress + podSelector: + matchLabels: + app.kubernetes.io/name: tempo + app.kubernetes.io/component: memcached + ingress: + - from: + - podSelector: + matchLabels: + app.kubernetes.io/name: tempo + app.kubernetes.io/component: querier + ports: + - port: 11211 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: tempo-distributed-ingress-prometheus-metrics + namespace: tracing + labels: + cluster.kfd.sighup.io/module: tracing +spec: + policyTypes: + - Ingress + podSelector: + matchLabels: + app.kubernetes.io/name: tempo + ingress: + - ports: + - port: 3100 + protocol: TCP + from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: monitoring + podSelector: + matchLabels: + app.kubernetes.io/name: prometheus +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: tempo-components-egress-https + namespace: tracing + labels: + cluster.kfd.sighup.io/module: tracing +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app.kubernetes.io/name: tempo + app.kubernetes.io/instance: tempo-distributed + egress: + - ports: + - port: 443 + protocol: TCP +{{- if eq .spec.distribution.modules.tracing.tempo.backend "minio" }} +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: tempo-distributed-egress-minio + namespace: tracing + labels: + cluster.kfd.sighup.io/module: tracing + cluster.kfd.sighup.io/tracing-backend: minio +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app.kubernetes.io/name: tempo + egress: + - to: + - podSelector: + matchLabels: + app: minio + namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: tracing + ports: + - port: 9000 + protocol: TCP +{{- else }} +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: tempo-distributed-egress-all + namespace: tracing + labels: + cluster.kfd.sighup.io/module: tracing +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app.kubernetes.io/name: tempo + egress: + - {} +{{- end }} +--- diff --git a/templates/distribution/scripts/apply.sh.tpl b/templates/distribution/scripts/apply.sh.tpl index e04b62446..a8d0c883f 100644 --- a/templates/distribution/scripts/apply.sh.tpl +++ b/templates/distribution/scripts/apply.sh.tpl @@ -43,7 +43,7 @@ $kubectlbin create namespace calico-system --dry-run=client -o yaml | $kubectlbi < out.yaml $yqbin 'select(.kind == "CustomResourceDefinition")' | $kubectlbin apply -f - --server-side < out.yaml $yqbin 'select(.kind == "CustomResourceDefinition")' | $kubectlbin wait --for condition=established --timeout=60s -f - -echo "Clean up init jobs, since they cannot be changed without conficts and they are idempotent by nature..." +echo "Clean up old init jobs..." $kubectlbin delete --ignore-not-found --wait --timeout=180s job minio-setup -n kube-system $kubectlbin delete --ignore-not-found --wait --timeout=180s job minio-logging-buckets-setup -n logging @@ -62,14 +62,14 @@ $kubectlbin delete --ignore-not-found --wait --timeout=180s job minio-tracing-bu | $kubectlbin apply -f - --server-side {{- if eq .spec.distribution.modules.ingress.nginx.type "dual" }} -$kubectlbin rollout status daemonset nginx-ingress-controller-external -n ingress-nginx --timeout=180s +$kubectlbin rollout status daemonset ingress-nginx-controller-external -n ingress-nginx --timeout=180s -$kubectlbin rollout status daemonset nginx-ingress-controller-internal -n ingress-nginx --timeout=180s +$kubectlbin rollout status daemonset ingress-nginx-controller-internal -n ingress-nginx --timeout=180s {{- end }} {{- if eq .spec.distribution.modules.ingress.nginx.type "single" }} -$kubectlbin rollout status daemonset nginx-ingress-controller -n ingress-nginx --timeout=180s +$kubectlbin rollout status daemonset ingress-nginx-controller -n ingress-nginx --timeout=180s {{- end }} diff --git a/templates/distribution/scripts/pre-apply.sh.tpl b/templates/distribution/scripts/pre-apply.sh.tpl index 3661d991c..6b41a562a 100644 --- a/templates/distribution/scripts/pre-apply.sh.tpl +++ b/templates/distribution/scripts/pre-apply.sh.tpl @@ -17,6 +17,21 @@ vendorPath="{{ .paths.vendorPath }}" # Text generated with: https://www.patorjk.com/software/taag/#p=display&f=ANSI%20Regular&t=TRACING%20TYPE +# ███ ██ ███████ ████████ ██ ██ ██████ ██████ ██ ██ ██████ ██████ ██ ██ ██████ ██ ███████ ███████ +# ████ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ +# ██ ██ ██ █████ ██ ██ █ ██ ██ ██ ██████ █████ ██████ ██ ██ ██ ██ ██ ██ █████ ███████ +# ██ ██ ██ ██ ██ ██ ███ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ +# ██ ████ ███████ ██ ███ ███ ██████ ██ ██ ██ ██ ██ ██████ ███████ ██ ██████ ██ ███████ ███████ + +{{- if index .reducers "distributionCommonNetworkPoliciesEnabled" }} + +{{- if eq .reducers.distributionCommonNetworkPoliciesEnabled.to false }} + $kubectlbin delete --ignore-not-found --wait --timeout=180s networkpolicies -A -l cluster.kfd.sighup.io/module + echo "KFD Network Policies deleted" +{{- end }} + +{{- end }} + # ██ ██████ ██████ ██████ ██ ███ ██ ██████ ████████ ██ ██ ██████ ███████ # ██ ██ ██ ██ ██ ██ ████ ██ ██ ██ ██ ██ ██ ██ ██ # ██ ██ ██ ██ ███ ██ ███ ██ ██ ██ ██ ██ ███ ██ ████ ██████ █████ @@ -29,8 +44,8 @@ deleteOpensearch() { $kubectlbin delete --ignore-not-found --wait --timeout=180s ingress -n logging opensearch-dashboards $kubectlbin delete --ignore-not-found --wait --timeout=180s ingress -n pomerium opensearch-dashboards - $kustomizebin build $vendorPath/modules/logging/katalog/opensearch-dashboards > delete-opensearch.yaml - $kustomizebin build $vendorPath/modules/logging/katalog/opensearch-triple >> delete-opensearch.yaml + $kustomizebin build $vendorPath/modules/logging/katalog/opensearch-dashboards > delete-opensearch-dashboards.yaml + $kustomizebin build $vendorPath/modules/logging/katalog/opensearch-triple > delete-opensearch.yaml {{- if eq .spec.distribution.modules.monitoring.type "none" }} if ! $kubectlbin get apiservice v1.monitoring.coreos.com; then @@ -41,6 +56,8 @@ deleteOpensearch() { $kubectlbin delete --ignore-not-found --wait --timeout=180s -f delete-opensearch.yaml $kubectlbin delete --ignore-not-found -l app.kubernetes.io/name=opensearch pvc -n logging --wait --timeout=180s + $kubectlbin delete --ignore-not-found --wait --timeout=180s -f delete-opensearch-dashboards.yaml + $kubectlbin delete --ignore-not-found --wait --timeout=180s networkpolicies -A -l cluster.kfd.sighup.io/logging-type=opensearch echo "OpenSearch resources deleted" } @@ -57,6 +74,7 @@ deleteLoki() { $kubectlbin delete --ignore-not-found --wait --timeout=180s -f delete-loki.yaml $kubectlbin delete --ignore-not-found -l app.kubernetes.io/name=loki-distributed pvc -n logging --wait --timeout=180s + $kubectlbin delete --ignore-not-found --wait --timeout=180s networkpolicies -A -l cluster.kfd.sighup.io/logging-type=loki echo "Loki resources deleted" } @@ -81,6 +99,7 @@ $kustomizebin build $vendorPath/modules/logging/katalog/minio-ha > delete-loggin fi {{- end }} $kubectlbin delete --ignore-not-found --wait --timeout=180s -f delete-logging-minio-ha.yaml + $kubectlbin delete --ignore-not-found --wait --timeout=180s networkpolicies -A -l cluster.kfd.sighup.io/logging-backend=minio echo "Minio Logging deleted" } @@ -165,7 +184,7 @@ deleteGatekeeper() { $kustomizebin build $vendorPath/modules/opa/katalog/gatekeeper/monitoring | $kubectlbin delete --ignore-not-found --wait --timeout=180s -f - {{- end }} $kustomizebin build $vendorPath/modules/opa/katalog/gatekeeper/core | $kubectlbin delete --ignore-not-found --wait --timeout=180s -f - - + $kubectlbin delete --ignore-not-found --wait --timeout=180s -A networkpolicy -l cluster.kfd.sighup.io/policy-type=gatekeeper echo "Gatekeeper resources deleted" } @@ -173,6 +192,7 @@ deleteKyverno() { $kustomizebin build $vendorPath/modules/opa/katalog/kyverno | $kubectlbin delete --ignore-not-found --wait --timeout=180s -f - $kubectlbin delete --ignore-not-found --wait --timeout=180s validatingwebhookconfiguration -l webhook.kyverno.io/managed-by=kyverno $kubectlbin delete --ignore-not-found --wait --timeout=180s mutatingwebhookconfiguration -l webhook.kyverno.io/managed-by=kyverno + $kubectlbin delete --ignore-not-found --wait --timeout=180s -A networkpolicy -l cluster.kfd.sighup.io/policy-type=kyverno echo "Kyverno resources deleted" } @@ -295,6 +315,7 @@ deleteTracingMinioHA() { fi {{- end }} $kubectlbin delete --ignore-not-found --wait --timeout=180s -f delete-tracing-minio-ha.yaml + $kubectlbin delete --ignore-not-found --wait --timeout=180s -A networkpolicy -l cluster.kfd.sighup.io/tracing-backend=minio echo "Minio HA on tracing namespace deleted" } @@ -441,6 +462,8 @@ deleteMimir() { $kustomizebin build $vendorPath/modules/monitoring/katalog/minio-ha | $kubectlbin delete --ignore-not-found --wait --timeout=360s -f - $kubectlbin delete -l app.kubernetes.io/name=mimir pvc -n monitoring --wait --timeout=360s $kubectlbin delete -l app=minio,release=minio-monitoring pvc -n monitoring --wait --timeout=360s + $kubectlbin delete ingress minio-monitoring -n monitoring --wait --timeout=360s --ignore-not-found + $kubectlbin delete ingress minio-monitoring -n pomerium --wait --timeout=360s --ignore-not-found echo "Mimir resources deleted" } @@ -519,6 +542,7 @@ deleteMimir() { # ██ ██ ██ ██ ██ ██ ██ ██ ██████ ██ ██ ██████ ██ ██ ███████ ██ ████ ██████ + {{- if index .reducers "distributionModulesMonitoringMimirBackend" }} deleteMimirMinioHA() { @@ -555,8 +579,8 @@ deleteNginx() { $kustomizebin build $vendorPath/modules/ingress/katalog/nginx > delete-nginx.yaml $kustomizebin build $vendorPath/modules/ingress/katalog/dual-nginx > delete-dual-nginx.yaml - $kustomizebin build $vendorPath/modules/ingress/katalog/external-dns/public > delete-external-dns.yaml - $kustomizebin build $vendorPath/modules/ingress/katalog/external-dns/private >> delete-external-dns.yaml + $kustomizebin build $vendorPath/modules/ingress/katalog/external-dns/public > delete-external-dns-public.yaml + $kustomizebin build $vendorPath/modules/ingress/katalog/external-dns/private > delete-external-dns-private.yaml $kustomizebin build $vendorPath/modules/ingress/katalog/forecastle > delete-forecastle.yaml {{- if eq .spec.distribution.modules.monitoring.type "none" }} @@ -565,13 +589,16 @@ deleteNginx() { cp delete-nginx-filtered.yaml delete-nginx.yaml cat delete-dual-nginx.yaml | $yqbin 'select(.apiVersion != "monitoring.coreos.com/v1")' > delete-dual-nginx-filtered.yaml cp delete-dual-nginx-filtered.yaml delete-dual-nginx.yaml - cat delete-external-dns.yaml | $yqbin 'select(.apiVersion != "monitoring.coreos.com/v1")' > delete-external-dns-filtered.yaml - cp delete-external-dns-filtered.yaml delete-external-dns.yaml + cat delete-external-dns-public.yaml | $yqbin 'select(.apiVersion != "monitoring.coreos.com/v1")' > delete-external-dns-public-filtered.yaml + cp delete-external-dns-public-filtered.yaml delete-external-dns-public.yaml + cat delete-external-dns-private.yaml | $yqbin 'select(.apiVersion != "monitoring.coreos.com/v1")' > delete-external-dns-private-filtered.yaml + cp delete-external-dns-private-filtered.yaml delete-external-dns-private.yaml cat delete-forecastle.yaml | $yqbin 'select(.apiVersion != "monitoring.coreos.com/v1")' > delete-forecastle-filtered.yaml cp delete-forecastle-filtered.yaml delete-forecastle.yaml fi {{- end }} - $kubectlbin delete --ignore-not-found --wait --timeout=180s -f delete-external-dns.yaml + $kubectlbin delete --ignore-not-found --wait --timeout=180s -f delete-external-dns-public.yaml + $kubectlbin delete --ignore-not-found --wait --timeout=180s -f delete-external-dns-private.yaml $kubectlbin delete --ignore-not-found --wait --timeout=180s -f delete-forecastle.yaml $kubectlbin delete --ignore-not-found --wait --timeout=180s -f delete-dual-nginx.yaml $kubectlbin delete --ignore-not-found --wait --timeout=180s -f delete-nginx.yaml @@ -586,6 +613,7 @@ deleteNginxIngresses() { $kubectlbin delete --ignore-not-found --wait --timeout=180s ingress -n logging --all $kubectlbin delete --ignore-not-found --wait --timeout=180s ingress -n gatekeeper-system --all $kubectlbin delete --ignore-not-found --wait --timeout=180s ingress -n ingress-nginx --all + $kubectlbin delete --ignore-not-found --wait --timeout=180s ingress -n kube-system --all # hubble, gangplank, dex echo "All the infrastructural ingresses associated with nginx have been deleted" } @@ -665,39 +693,50 @@ deleteInfraIngresses() { $kubectlbin delete --ignore-not-found --wait --timeout=180s ingress -n logging --all $kubectlbin delete --ignore-not-found --wait --timeout=180s ingress -n gatekeeper-system --all $kubectlbin delete --ignore-not-found --wait --timeout=180s ingress -n ingress-nginx --all + $kubectlbin delete --ignore-not-found --wait --timeout=180s ingress -n kube-system --all # hubble, dex, gangplank echo "All the infrastructural ingresses have been deleted" } {{- if eq .reducers.distributionModulesAuthProviderType.to "none" }} - +# Disable Auth +echo "Deleting Auth module resources" deleteDex deleteGangplank deletePomeriumIngresses deletePomerium - +echo "All Auth module resources have been deleted" {{- end }} -{{- if eq .reducers.distributionModulesAuthProviderType.to "sso" }} - -{{- if eq .reducers.distributionModulesAuthProviderType.from "basicAuth" }} +{{- if eq .reducers.distributionModulesAuthProviderType.from "sso" }} + {{- if eq .reducers.distributionModulesAuthProviderType.to "basicAuth" }} +echo "Running clean up tasks for migrating Auth type from SSO to basicAuth..." deleteDex deleteGangplank +# delete infra ingresses in pomerium namspace because they need to be recreated in the right namespace deletePomeriumIngresses deletePomerium +echo "Finished clean up tasks for migrating Auth type from SSO to basicAuth." + {{- end }} {{- end }} +{{- if eq .reducers.distributionModulesAuthProviderType.from "none" }} + {{- if eq .reducers.distributionModulesAuthProviderType.to "sso" }} + # we need to delete infra ingresses that are present on each namespace before switching to sso, because they will be recreated in the pomerium namespace. + deleteInfraIngresses + {{- end }} {{- end }} -{{- if eq .reducers.distributionModulesAuthProviderType.to "sso" }} - {{- if eq .reducers.distributionModulesAuthProviderType.from "basicAuth" }} + {{- if eq .reducers.distributionModulesAuthProviderType.to "sso" }} +echo "Running clean up tasks for migrating Auth type from basicAuth to SSO..." deleteDex deleteGangplank +# delete infra ingresses because they need to be recreated in the pomerium namespace deleteInfraIngresses deletePomerium -{{- end }} - +echo "Finished clean up tasks for migrating Auth type from basicAuth to SSO." + {{- end }} {{- end }} {{- end }} # end distributionModulesAuthProviderType diff --git a/templates/kubernetes/ekscluster/terraform/main.auto.tfvars.tpl b/templates/kubernetes/ekscluster/terraform/main.auto.tfvars.tpl index d5fd1c371..c03abb66b 100644 --- a/templates/kubernetes/ekscluster/terraform/main.auto.tfvars.tpl +++ b/templates/kubernetes/ekscluster/terraform/main.auto.tfvars.tpl @@ -32,6 +32,7 @@ cluster_service_ipv4_cidr = null cluster_service_ipv4_cidr = {{ .spec.kubernetes.serviceIpV4Cidr | quote }} {{- end }} node_pools_launch_kind = {{ .spec.kubernetes.nodePoolsLaunchKind | quote }} +node_pools_global_ami_type = {{ .spec.kubernetes.nodePoolGlobalAmiType | quote }} {{- if hasKeyAny .spec.kubernetes "logRetentionDays" }} cluster_log_retention_days = {{ .spec.kubernetes.logRetentionDays }} @@ -97,7 +98,11 @@ workers_iam_role_name_prefix_override = {{ .spec.kubernetes.workersIAMRoleNamePr {{- end}} {{- if hasKeyAny $np "ami" }} - {{- $currNodePool = mergeOverwrite $currNodePool (dict "ami_id" $np.ami.id) }} + {{- if and (eq $np.type "self-managed") (hasKeyAny $np.ami "id") (not (hasKeyAny $np.ami "type")) }} + {{- $currNodePool = mergeOverwrite $currNodePool (dict "ami_id" $np.ami.id "ami_owners" (list $np.ami.owner)) }} + {{- else if and (hasKeyAny $np.ami "type") (not (hasKeyAny $np.ami "id")) }} + {{- $currNodePool = mergeOverwrite $currNodePool (dict "ami_type" $np.ami.type) }} + {{- end }} {{- end }} {{- if hasKeyAny $np.instance "spot" }} diff --git a/templates/kubernetes/ekscluster/terraform/main.tf.tpl b/templates/kubernetes/ekscluster/terraform/main.tf.tpl index 5932b4169..615ca5b50 100644 --- a/templates/kubernetes/ekscluster/terraform/main.tf.tpl +++ b/templates/kubernetes/ekscluster/terraform/main.tf.tpl @@ -62,6 +62,7 @@ module "fury" { ssh_public_key = var.ssh_public_key node_pools = var.node_pools node_pools_launch_kind = var.node_pools_launch_kind + node_pools_global_ami_type = var.node_pools_global_ami_type tags = var.tags cluster_iam_role_name = var.cluster_iam_role_name_prefix_override workers_role_name = var.workers_iam_role_name_prefix_override diff --git a/templates/kubernetes/ekscluster/terraform/variables.tf b/templates/kubernetes/ekscluster/terraform/variables.tf index 30dc3547c..993e88ef0 100644 --- a/templates/kubernetes/ekscluster/terraform/variables.tf +++ b/templates/kubernetes/ekscluster/terraform/variables.tf @@ -63,19 +63,21 @@ variable "ssh_public_key" { variable "node_pools" { description = "An object list defining node pools configurations" type = list(object({ - name = string type = optional(string, "self-managed") # "eks-managed" or "self-managed" + name = string ami_id = optional(string) - version = optional(string) # null to use cluster_version + ami_owners = optional(list(string), ["amazon"]) + ami_type = optional(string, null) + version = optional(string, null) # null to use cluster_version min_size = number max_size = number instance_type = string - container_runtime = optional(string) - spot_instance = optional(bool) - max_pods = optional(number) # null to use default upstream configuration + container_runtime = optional(string, "containerd") + spot_instance = optional(bool, false) + max_pods = optional(number, null) # null to use default upstream configuration volume_size = optional(number, 100) volume_type = optional(string, "gp2") - subnets = optional(list(string)) # null to use default upstream configuration + subnets = optional(list(string), null) # null to use default upstream configuration labels = optional(map(string)) taints = optional(list(string)) tags = optional(map(string)) @@ -219,3 +221,13 @@ variable "workers_iam_role_name_prefix_override" { type = string default = "" } + +variable "node_pools_global_ami_type" { + type = string + description = "Global default AMI type used for EKS worker nodes. This will apply to all node pools unless overridden by a specific node pool." + default = "alinux2" + validation { + condition = contains(["alinux2", "alinux2023"], var.node_pools_global_ami_type) + error_message = "The global AMI type must be either 'alinux2' or 'alinux2023'." + } +} \ No newline at end of file diff --git a/templates/kubernetes/onpremises/98.cluster-certificates-renewal.yaml.tpl b/templates/kubernetes/onpremises/98.cluster-certificates-renewal.yaml.tpl index 919718197..26bcbb76d 100644 --- a/templates/kubernetes/onpremises/98.cluster-certificates-renewal.yaml.tpl +++ b/templates/kubernetes/onpremises/98.cluster-certificates-renewal.yaml.tpl @@ -29,7 +29,7 @@ serial: 1 tasks: - # Get Kubernetes version and modify the output from something like "v1.29.3" to "1.29". + # Get Kubernetes version and modify the output from something like "v1.29.4" to "1.29". - name: Get the current Kubernetes version shell: | K8S_VERSION=$(kubectl version --kubeconfig=/etc/kubernetes/admin.conf --short 2>/dev/null | grep 'Server Version:' | awk '{print $3}') diff --git a/templates/kubernetes/onpremises/hosts.yaml.tpl b/templates/kubernetes/onpremises/hosts.yaml.tpl index 4918e0a6f..191675e3a 100644 --- a/templates/kubernetes/onpremises/hosts.yaml.tpl +++ b/templates/kubernetes/onpremises/hosts.yaml.tpl @@ -49,6 +49,9 @@ all: {{- if index .spec.kubernetes.advanced.users "names" }} kubernetes_users_names: {{ .spec.kubernetes.advanced.users.names | toYaml | indent 10 }} + {{- end }} + {{- if index .spec.kubernetes.advanced.users "org" }} + kubernetes_users_org: "{{ .spec.kubernetes.advanced.users.org }}" {{- end }} {{- end }} diff --git a/tests/e2e-kfddistribution-upgrades.sh b/tests/e2e-kfddistribution-upgrades.sh index 97807018c..282cfef2b 100755 --- a/tests/e2e-kfddistribution-upgrades.sh +++ b/tests/e2e-kfddistribution-upgrades.sh @@ -6,21 +6,12 @@ set -e echo "----------------------------------------------------------------------------" -echo "Executing furyctl for the initial setup" -/tmp/furyctl apply --config tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.28.0.yaml --outdir "$PWD" --disable-analytics +echo "Executing furyctl for the initial setup 1.28.4" +/tmp/furyctl apply --config tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.28.4.yaml --outdir "$PWD" --disable-analytics echo "----------------------------------------------------------------------------" -echo "Executing upgrade to an intermediate version" -/tmp/furyctl apply --upgrade --config tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.28.1.yaml --outdir "$PWD" --force upgrades --disable-analytics - -echo "----------------------------------------------------------------------------" -echo "Executing upgrade to the next version" -/tmp/furyctl apply --upgrade --config tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.28.2.yaml --outdir "$PWD" --force upgrades --disable-analytics - -echo "----------------------------------------------------------------------------" -echo "Executing upgrade to the next version" -/tmp/furyctl apply --upgrade --config tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.28.3.yaml --outdir "$PWD" --force upgrades --disable-analytics - -echo "----------------------------------------------------------------------------" -echo "Executing upgrade to the latest version" -/tmp/furyctl apply --upgrade --config tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.28.4.yaml --outdir "$PWD" --distro-location ./ --force upgrades --disable-analytics +echo "Executing upgrade to 1.28.5" +# we set the switch date for Loki to "tomorrow". Notice that `-d flag` does not work on Darwin, you need to use `-v +1d` instead. +# this is needed only when upgrading from 1.28.4 to 1.28.5 (and equivalent versions) +yq -i ".spec.distribution.modules.logging.loki.tsdbStartDate=\"$(date -I -d '+1 day')\"" tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.28.5.yaml +/tmp/furyctl apply --upgrade --config tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.28.5.yaml --outdir "$PWD" --distro-location ./ --force upgrades --disable-analytics diff --git a/tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.28.0.yaml b/tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.28.0.yaml deleted file mode 100644 index 239916ee2..000000000 --- a/tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.28.0.yaml +++ /dev/null @@ -1,104 +0,0 @@ -# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file. - ---- -apiVersion: kfd.sighup.io/v1alpha2 -kind: KFDDistribution -metadata: - name: sighup -spec: - distributionVersion: v1.28.0 - # This section describes how the KFD distribution will be installed - distribution: - kubeconfig: "{env://KUBECONFIG}" - # This common configuration will be applied to all the packages that will be installed in the cluster - common: {} - # This section contains all the configurations for all the KFD core modules - modules: - networking: - type: calico - # This section contains all the configurations for the ingress module - ingress: - baseDomain: fury.sighup.cc - nginx: - type: single - tls: - provider: certManager - certManager: - clusterIssuer: - name: letsencrypt-fury - email: sighup@sighup.cc - type: http01 - logging: - type: loki - minio: - storageSize: 20Gi - rootUser: - username: sighup - password: secretpassword1 - monitoring: - type: prometheus - prometheus: - resources: - requests: - cpu: 10m - limits: - cpu: 2000m - memory: 6Gi - tracing: - type: none - policy: - type: kyverno - kyverno: - additionalExcludedNamespaces: ["local-path-storage"] - validationFailureAction: enforce - installDefaultPolicies: true - dr: - type: on-premises - velero: {} - auth: - provider: - type: basicAuth - basicAuth: - username: test - password: testpassword - # patches for kind compatibility and resource setting - customPatches: - patchesStrategicMerge: - - | - apiVersion: apps/v1 - kind: StatefulSet - metadata: - name: minio-logging - namespace: logging - spec: - template: - spec: - containers: - - name: minio - resources: - requests: - cpu: 10m - memory: 50Mi - - | - $patch: delete - apiVersion: logging-extensions.banzaicloud.io/v1alpha1 - kind: HostTailer - metadata: - name: systemd-common - namespace: logging - - | - $patch: delete - apiVersion: logging-extensions.banzaicloud.io/v1alpha1 - kind: HostTailer - metadata: - name: systemd-etcd - namespace: logging - - | - $patch: delete - apiVersion: apps/v1 - kind: DaemonSet - metadata: - name: x509-certificate-exporter-control-plane - namespace: monitoring diff --git a/tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.28.1.yaml b/tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.28.1.yaml deleted file mode 100644 index 2eaa2a346..000000000 --- a/tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.28.1.yaml +++ /dev/null @@ -1,104 +0,0 @@ -# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file. - ---- -apiVersion: kfd.sighup.io/v1alpha2 -kind: KFDDistribution -metadata: - name: sighup -spec: - distributionVersion: v1.28.1 - # This section describes how the KFD distribution will be installed - distribution: - kubeconfig: "{env://KUBECONFIG}" - # This common configuration will be applied to all the packages that will be installed in the cluster - common: {} - # This section contains all the configurations for all the KFD core modules - modules: - networking: - type: calico - # This section contains all the configurations for the ingress module - ingress: - baseDomain: fury.sighup.cc - nginx: - type: single - tls: - provider: certManager - certManager: - clusterIssuer: - name: letsencrypt-fury - email: sighup@sighup.cc - type: http01 - logging: - type: loki - minio: - storageSize: 20Gi - rootUser: - username: sighup - password: secretpassword1 - monitoring: - type: prometheus - prometheus: - resources: - requests: - cpu: 10m - limits: - cpu: 2000m - memory: 6Gi - tracing: - type: none - policy: - type: kyverno - kyverno: - additionalExcludedNamespaces: ["local-path-storage"] - validationFailureAction: enforce - installDefaultPolicies: true - dr: - type: on-premises - velero: {} - auth: - provider: - type: basicAuth - basicAuth: - username: test - password: testpassword - # patches for kind compatibility and resource setting - customPatches: - patchesStrategicMerge: - - | - apiVersion: apps/v1 - kind: StatefulSet - metadata: - name: minio-logging - namespace: logging - spec: - template: - spec: - containers: - - name: minio - resources: - requests: - cpu: 10m - memory: 50Mi - - | - $patch: delete - apiVersion: logging-extensions.banzaicloud.io/v1alpha1 - kind: HostTailer - metadata: - name: systemd-common - namespace: logging - - | - $patch: delete - apiVersion: logging-extensions.banzaicloud.io/v1alpha1 - kind: HostTailer - metadata: - name: systemd-etcd - namespace: logging - - | - $patch: delete - apiVersion: apps/v1 - kind: DaemonSet - metadata: - name: x509-certificate-exporter-control-plane - namespace: monitoring diff --git a/tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.28.2.yaml b/tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.28.2.yaml deleted file mode 100644 index f5762ca06..000000000 --- a/tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.28.2.yaml +++ /dev/null @@ -1,104 +0,0 @@ -# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file. - ---- -apiVersion: kfd.sighup.io/v1alpha2 -kind: KFDDistribution -metadata: - name: sighup -spec: - distributionVersion: v1.28.2 - # This section describes how the KFD distribution will be installed - distribution: - kubeconfig: "{env://KUBECONFIG}" - # This common configuration will be applied to all the packages that will be installed in the cluster - common: {} - # This section contains all the configurations for all the KFD core modules - modules: - networking: - type: calico - # This section contains all the configurations for the ingress module - ingress: - baseDomain: fury.sighup.cc - nginx: - type: single - tls: - provider: certManager - certManager: - clusterIssuer: - name: letsencrypt-fury - email: sighup@sighup.cc - type: http01 - logging: - type: loki - minio: - storageSize: 20Gi - rootUser: - username: sighup - password: secretpassword1 - monitoring: - type: prometheus - prometheus: - resources: - requests: - cpu: 10m - limits: - cpu: 2000m - memory: 6Gi - tracing: - type: none - policy: - type: kyverno - kyverno: - additionalExcludedNamespaces: ["local-path-storage"] - validationFailureAction: enforce - installDefaultPolicies: true - dr: - type: on-premises - velero: {} - auth: - provider: - type: basicAuth - basicAuth: - username: test - password: testpassword - # patches for kind compatibility and resource setting - customPatches: - patchesStrategicMerge: - - | - apiVersion: apps/v1 - kind: StatefulSet - metadata: - name: minio-logging - namespace: logging - spec: - template: - spec: - containers: - - name: minio - resources: - requests: - cpu: 10m - memory: 50Mi - - | - $patch: delete - apiVersion: logging-extensions.banzaicloud.io/v1alpha1 - kind: HostTailer - metadata: - name: systemd-common - namespace: logging - - | - $patch: delete - apiVersion: logging-extensions.banzaicloud.io/v1alpha1 - kind: HostTailer - metadata: - name: systemd-etcd - namespace: logging - - | - $patch: delete - apiVersion: apps/v1 - kind: DaemonSet - metadata: - name: x509-certificate-exporter-control-plane - namespace: monitoring diff --git a/tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.28.3.yaml b/tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.28.5.yaml similarity index 94% rename from tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.28.3.yaml rename to tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.28.5.yaml index 9616c8699..5bd2f5248 100644 --- a/tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.28.3.yaml +++ b/tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.28.5.yaml @@ -8,7 +8,7 @@ kind: KFDDistribution metadata: name: sighup spec: - distributionVersion: v1.28.3 + distributionVersion: v1.28.5 # This section describes how the KFD distribution will be installed distribution: kubeconfig: "{env://KUBECONFIG}" @@ -32,6 +32,8 @@ spec: type: http01 logging: type: loki + loki: + tsdbStartDate: "2024-11-28" # this should be a day in the future when upgrading minio: storageSize: 20Gi rootUser: @@ -52,7 +54,7 @@ spec: type: kyverno kyverno: additionalExcludedNamespaces: ["local-path-storage"] - validationFailureAction: enforce + validationFailureAction: Enforce installDefaultPolicies: true dr: type: on-premises diff --git a/tests/e2e/kfddistribution/furyctl-10-migrate-from-none-to-safe-values.yaml b/tests/e2e/kfddistribution/furyctl-10-migrate-from-none-to-safe-values.yaml index 9f1f1d4ed..bdf31ad58 100644 --- a/tests/e2e/kfddistribution/furyctl-10-migrate-from-none-to-safe-values.yaml +++ b/tests/e2e/kfddistribution/furyctl-10-migrate-from-none-to-safe-values.yaml @@ -8,7 +8,7 @@ kind: KFDDistribution metadata: name: sighup spec: - distributionVersion: v1.28.3 + distributionVersion: v1.28.5 # This section describes how the KFD distribution will be installed distribution: kubeconfig: "{env://KUBECONFIG}" @@ -34,6 +34,7 @@ spec: type: loki loki: backend: minio + tsdbStartDate: "2024-11-21" minio: storageSize: 20Gi rootUser: @@ -69,7 +70,7 @@ spec: kyverno: additionalExcludedNamespaces: ["local-path-storage"] installDefaultPolicies: true - validationFailureAction: enforce + validationFailureAction: Enforce dr: type: on-premises velero: diff --git a/tests/e2e/kfddistribution/furyctl-11-migrate-from-kyverno-default-policies-to-disabled.yaml b/tests/e2e/kfddistribution/furyctl-11-migrate-from-kyverno-default-policies-to-disabled.yaml index 12d87e1f6..11110ab7a 100644 --- a/tests/e2e/kfddistribution/furyctl-11-migrate-from-kyverno-default-policies-to-disabled.yaml +++ b/tests/e2e/kfddistribution/furyctl-11-migrate-from-kyverno-default-policies-to-disabled.yaml @@ -8,7 +8,7 @@ kind: KFDDistribution metadata: name: sighup spec: - distributionVersion: v1.28.3 + distributionVersion: v1.28.5 # This section describes how the KFD distribution will be installed distribution: kubeconfig: "{env://KUBECONFIG}" @@ -34,6 +34,7 @@ spec: type: loki loki: backend: minio + tsdbStartDate: "2024-11-21" minio: storageSize: 20Gi rootUser: @@ -69,7 +70,7 @@ spec: kyverno: additionalExcludedNamespaces: ["local-path-storage"] installDefaultPolicies: false - validationFailureAction: enforce + validationFailureAction: Enforce dr: type: on-premises velero: diff --git a/tests/e2e/kfddistribution/furyctl-12-migrate-from-alertmanagerconfigs-to-disabled.yaml b/tests/e2e/kfddistribution/furyctl-12-migrate-from-alertmanagerconfigs-to-disabled.yaml index ddf3cbc24..147846617 100644 --- a/tests/e2e/kfddistribution/furyctl-12-migrate-from-alertmanagerconfigs-to-disabled.yaml +++ b/tests/e2e/kfddistribution/furyctl-12-migrate-from-alertmanagerconfigs-to-disabled.yaml @@ -8,7 +8,7 @@ kind: KFDDistribution metadata: name: sighup spec: - distributionVersion: v1.28.3 + distributionVersion: v1.28.5 # This section describes how the KFD distribution will be installed distribution: kubeconfig: "{env://KUBECONFIG}" @@ -34,6 +34,7 @@ spec: type: loki loki: backend: minio + tsdbStartDate: "2024-11-21" minio: storageSize: 20Gi rootUser: @@ -71,7 +72,7 @@ spec: kyverno: additionalExcludedNamespaces: ["local-path-storage"] installDefaultPolicies: false - validationFailureAction: enforce + validationFailureAction: Enforce dr: type: on-premises velero: diff --git a/tests/e2e/kfddistribution/furyctl-2-migrate-from-tempo-to-none.yaml b/tests/e2e/kfddistribution/furyctl-2-migrate-from-tempo-to-none.yaml index f19dc7cc4..3fd027cb7 100644 --- a/tests/e2e/kfddistribution/furyctl-2-migrate-from-tempo-to-none.yaml +++ b/tests/e2e/kfddistribution/furyctl-2-migrate-from-tempo-to-none.yaml @@ -8,7 +8,7 @@ kind: KFDDistribution metadata: name: sighup spec: - distributionVersion: v1.28.3 + distributionVersion: v1.28.5 # This section describes how the KFD distribution will be installed distribution: kubeconfig: "{env://KUBECONFIG}" @@ -34,6 +34,7 @@ spec: type: loki loki: backend: minio + tsdbStartDate: "2024-11-21" minio: storageSize: 20Gi rootUser: @@ -69,7 +70,7 @@ spec: kyverno: additionalExcludedNamespaces: ["local-path-storage"] installDefaultPolicies: true - validationFailureAction: enforce + validationFailureAction: Enforce dr: type: on-premises velero: diff --git a/tests/e2e/kfddistribution/furyctl-3-migrate-from-kyverno-to-none.yaml b/tests/e2e/kfddistribution/furyctl-3-migrate-from-kyverno-to-none.yaml index 99c4a332c..e906c279a 100644 --- a/tests/e2e/kfddistribution/furyctl-3-migrate-from-kyverno-to-none.yaml +++ b/tests/e2e/kfddistribution/furyctl-3-migrate-from-kyverno-to-none.yaml @@ -8,7 +8,7 @@ kind: KFDDistribution metadata: name: sighup spec: - distributionVersion: v1.28.3 + distributionVersion: v1.28.5 # This section describes how the KFD distribution will be installed distribution: kubeconfig: "{env://KUBECONFIG}" @@ -34,6 +34,7 @@ spec: type: loki loki: backend: minio + tsdbStartDate: "2024-11-21" minio: storageSize: 20Gi rootUser: @@ -69,7 +70,7 @@ spec: kyverno: additionalExcludedNamespaces: ["local-path-storage"] installDefaultPolicies: true - validationFailureAction: enforce + validationFailureAction: Enforce dr: type: on-premises velero: diff --git a/tests/e2e/kfddistribution/furyctl-4-migrate-from-velero-to-none.yaml b/tests/e2e/kfddistribution/furyctl-4-migrate-from-velero-to-none.yaml index 8799532da..80468eede 100644 --- a/tests/e2e/kfddistribution/furyctl-4-migrate-from-velero-to-none.yaml +++ b/tests/e2e/kfddistribution/furyctl-4-migrate-from-velero-to-none.yaml @@ -8,7 +8,7 @@ kind: KFDDistribution metadata: name: sighup spec: - distributionVersion: v1.28.3 + distributionVersion: v1.28.5 # This section describes how the KFD distribution will be installed distribution: kubeconfig: "{env://KUBECONFIG}" @@ -34,6 +34,7 @@ spec: type: loki loki: backend: minio + tsdbStartDate: "2024-11-21" minio: storageSize: 20Gi rootUser: @@ -69,7 +70,7 @@ spec: kyverno: additionalExcludedNamespaces: ["local-path-storage"] installDefaultPolicies: true - validationFailureAction: enforce + validationFailureAction: Enforce dr: type: none velero: diff --git a/tests/e2e/kfddistribution/furyctl-5-migrate-from-loki-to-none.yaml b/tests/e2e/kfddistribution/furyctl-5-migrate-from-loki-to-none.yaml index aac19c5fe..d340f4773 100644 --- a/tests/e2e/kfddistribution/furyctl-5-migrate-from-loki-to-none.yaml +++ b/tests/e2e/kfddistribution/furyctl-5-migrate-from-loki-to-none.yaml @@ -8,7 +8,7 @@ kind: KFDDistribution metadata: name: sighup spec: - distributionVersion: v1.28.3 + distributionVersion: v1.28.5 # This section describes how the KFD distribution will be installed distribution: kubeconfig: "{env://KUBECONFIG}" @@ -32,8 +32,6 @@ spec: type: http01 logging: type: none - loki: - backend: minio minio: storageSize: 20Gi rootUser: @@ -69,7 +67,7 @@ spec: kyverno: additionalExcludedNamespaces: ["local-path-storage"] installDefaultPolicies: true - validationFailureAction: enforce + validationFailureAction: Enforce dr: type: none velero: diff --git a/tests/e2e/kfddistribution/furyctl-6-migrate-from-mimir-to-none.yaml b/tests/e2e/kfddistribution/furyctl-6-migrate-from-mimir-to-none.yaml index b869e79d5..eca5a8cdd 100644 --- a/tests/e2e/kfddistribution/furyctl-6-migrate-from-mimir-to-none.yaml +++ b/tests/e2e/kfddistribution/furyctl-6-migrate-from-mimir-to-none.yaml @@ -8,7 +8,7 @@ kind: KFDDistribution metadata: name: sighup spec: - distributionVersion: v1.28.3 + distributionVersion: v1.28.5 # This section describes how the KFD distribution will be installed distribution: kubeconfig: "{env://KUBECONFIG}" @@ -32,8 +32,6 @@ spec: type: http01 logging: type: none - loki: - backend: minio minio: storageSize: 20Gi rootUser: @@ -69,7 +67,7 @@ spec: kyverno: additionalExcludedNamespaces: ["local-path-storage"] installDefaultPolicies: true - validationFailureAction: enforce + validationFailureAction: Enforce dr: type: none velero: diff --git a/tests/e2e/kfddistribution/furyctl-7-migrate-from-basicAuth-to-sso.yaml b/tests/e2e/kfddistribution/furyctl-7-migrate-from-basicAuth-to-sso.yaml index aa589f809..f9364efde 100644 --- a/tests/e2e/kfddistribution/furyctl-7-migrate-from-basicAuth-to-sso.yaml +++ b/tests/e2e/kfddistribution/furyctl-7-migrate-from-basicAuth-to-sso.yaml @@ -8,7 +8,7 @@ kind: KFDDistribution metadata: name: sighup spec: - distributionVersion: v1.28.3 + distributionVersion: v1.28.5 # This section describes how the KFD distribution will be installed distribution: kubeconfig: "{env://KUBECONFIG}" @@ -32,8 +32,6 @@ spec: type: http01 logging: type: none - loki: - backend: minio minio: storageSize: 20Gi rootUser: @@ -69,7 +67,7 @@ spec: kyverno: additionalExcludedNamespaces: ["local-path-storage"] installDefaultPolicies: true - validationFailureAction: enforce + validationFailureAction: Enforce dr: type: none velero: diff --git a/tests/e2e/kfddistribution/furyctl-8-migrate-from-sso-to-none.yaml b/tests/e2e/kfddistribution/furyctl-8-migrate-from-sso-to-none.yaml index 4b8bc790f..9dd6028e1 100644 --- a/tests/e2e/kfddistribution/furyctl-8-migrate-from-sso-to-none.yaml +++ b/tests/e2e/kfddistribution/furyctl-8-migrate-from-sso-to-none.yaml @@ -8,7 +8,7 @@ kind: KFDDistribution metadata: name: sighup spec: - distributionVersion: v1.28.3 + distributionVersion: v1.28.5 # This section describes how the KFD distribution will be installed distribution: kubeconfig: "{env://KUBECONFIG}" @@ -16,7 +16,7 @@ spec: common: {} # This section contains all the configurations for all the KFD core modules modules: - networking: + networking: type: calico # This section contains all the configurations for the ingress module ingress: @@ -32,8 +32,6 @@ spec: type: http01 logging: type: none - loki: - backend: minio minio: storageSize: 20Gi rootUser: @@ -69,7 +67,7 @@ spec: kyverno: additionalExcludedNamespaces: ["local-path-storage"] installDefaultPolicies: true - validationFailureAction: enforce + validationFailureAction: Enforce dr: type: none velero: diff --git a/tests/e2e/kfddistribution/furyctl-9-migrate-from-nginx-to-none.yaml b/tests/e2e/kfddistribution/furyctl-9-migrate-from-nginx-to-none.yaml index b37329105..7740f268a 100644 --- a/tests/e2e/kfddistribution/furyctl-9-migrate-from-nginx-to-none.yaml +++ b/tests/e2e/kfddistribution/furyctl-9-migrate-from-nginx-to-none.yaml @@ -8,7 +8,7 @@ kind: KFDDistribution metadata: name: sighup spec: - distributionVersion: v1.28.3 + distributionVersion: v1.28.5 # This section describes how the KFD distribution will be installed distribution: kubeconfig: "{env://KUBECONFIG}" @@ -32,8 +32,6 @@ spec: type: http01 logging: type: none - loki: - backend: minio minio: storageSize: 20Gi rootUser: @@ -69,7 +67,7 @@ spec: kyverno: additionalExcludedNamespaces: ["local-path-storage"] installDefaultPolicies: true - validationFailureAction: enforce + validationFailureAction: Enforce dr: type: none velero: diff --git a/tests/e2e/kfddistribution/furyctl-cleanup-all.yaml b/tests/e2e/kfddistribution/furyctl-cleanup-all.yaml index 558857359..16f352d3b 100644 --- a/tests/e2e/kfddistribution/furyctl-cleanup-all.yaml +++ b/tests/e2e/kfddistribution/furyctl-cleanup-all.yaml @@ -8,7 +8,7 @@ kind: KFDDistribution metadata: name: sighup spec: - distributionVersion: v1.28.3 + distributionVersion: v1.28.5 # This section describes how the KFD distribution will be installed distribution: kubeconfig: "{env://KUBECONFIG}" @@ -16,7 +16,7 @@ spec: common: {} # This section contains all the configurations for all the KFD core modules modules: - networking: + networking: type: calico # This section contains all the configurations for the ingress module ingress: @@ -44,3 +44,4 @@ spec: auth: provider: type: none + diff --git a/tests/e2e/kfddistribution/furyctl-init-cluster.yaml b/tests/e2e/kfddistribution/furyctl-init-cluster.yaml index 400192ad0..526ebedc6 100644 --- a/tests/e2e/kfddistribution/furyctl-init-cluster.yaml +++ b/tests/e2e/kfddistribution/furyctl-init-cluster.yaml @@ -8,7 +8,7 @@ kind: KFDDistribution metadata: name: sighup spec: - distributionVersion: v1.28.3 + distributionVersion: v1.28.5 # This section describes how the KFD distribution will be installed distribution: kubeconfig: "{env://KUBECONFIG}" @@ -37,6 +37,9 @@ spec: rootUser: username: sighup password: secretpassword1 + loki: + backend: minio + tsdbStartDate: "2024-11-21" monitoring: type: mimir prometheus: @@ -62,7 +65,7 @@ spec: type: kyverno kyverno: additionalExcludedNamespaces: ["local-path-storage"] - validationFailureAction: enforce + validationFailureAction: Enforce installDefaultPolicies: true dr: type: on-premises diff --git a/tests/e2e/kfddistribution/furyctl-init-with-values-from-nil.yaml b/tests/e2e/kfddistribution/furyctl-init-with-values-from-nil.yaml index b3e51a65e..67a599b8e 100644 --- a/tests/e2e/kfddistribution/furyctl-init-with-values-from-nil.yaml +++ b/tests/e2e/kfddistribution/furyctl-init-with-values-from-nil.yaml @@ -8,7 +8,7 @@ kind: KFDDistribution metadata: name: sighup spec: - distributionVersion: v1.28.3 + distributionVersion: v1.28.5 # This section describes how the KFD distribution will be installed distribution: kubeconfig: "{env://KUBECONFIG}" @@ -33,6 +33,7 @@ spec: logging: type: loki loki: + tsdbStartDate: "2024-11-21" backend: externalEndpoint externalEndpoint: endpoint: 192.168.1.100:9000 @@ -65,7 +66,7 @@ spec: kyverno: additionalExcludedNamespaces: ["local-path-storage"] installDefaultPolicies: false - validationFailureAction: enforce + validationFailureAction: Enforce dr: type: on-premises velero: @@ -116,4 +117,4 @@ spec: kind: DaemonSet metadata: name: x509-certificate-exporter-control-plane - namespace: monitoring + namespace: monitoring \ No newline at end of file diff --git a/tests/schema.sh b/tests/schema.sh index c1320c506..06f133371 100755 --- a/tests/schema.sh +++ b/tests/schema.sh @@ -29,7 +29,7 @@ test_schema() { yq "tests/schemas/${KIND}/${APIVER}/${EXAMPLE}.yaml" -o json > "${TMPDIR}/tests/schemas/${KIND}/${APIVER}/${EXAMPLE}.json" validate() { - jv "schemas/${KIND}/${APIVER}.json" "${TMPDIR}/tests/schemas/${KIND}/${APIVER}/${EXAMPLE}.json" + jv "schemas/${KIND}/${APIVER}.json" "${TMPDIR}/tests/schemas/${KIND}/${APIVER}/${EXAMPLE}.json" 2>&1 } run validate @@ -63,8 +63,8 @@ test_schema() { expect() { expect_no "${1}" - local EXPECTED_ERROR_1="[S#/\$defs/Spec/else/properties/kubernetes/properties/vpcId/type] expected null, but got string" - local EXPECTED_ERROR_2="[S#/\$defs/Spec/else/properties/kubernetes/properties/subnetIds/type] expected null, but got array" + local EXPECTED_ERROR_1="at '/spec/kubernetes/vpcId': got string, want null" + local EXPECTED_ERROR_2="at '/spec/kubernetes/subnetIds': got array, want null" if [[ "${output}" != *"${EXPECTED_ERROR_1}"* ]]; then return 2 @@ -92,7 +92,7 @@ test_schema() { expect() { expect_no - local EXPECTED_ERROR_1="[S#/\$defs/Spec/then/properties/kubernetes/required] missing properties: 'vpcId', 'subnetIds'" + local EXPECTED_ERROR_1="at '/spec/kubernetes': missing properties 'vpcId', 'subnetIds'" if [[ "${output}" != *"${EXPECTED_ERROR_1}"* ]]; then return 2 @@ -116,8 +116,8 @@ test_schema() { expect() { expect_no - local EXPECTED_ERROR_1="[S#/\$defs/Spec.Distribution.Modules.Auth/allOf/0/else/properties/dex/type] expected null, but got object" - local EXPECTED_ERROR_2="[S#/\$defs/Spec.Distribution.Modules.Auth/allOf/0/else/properties/pomerium/type] expected null, but got object" + local EXPECTED_ERROR_1="at '/spec/distribution/modules/auth/dex': got object, want null" + local EXPECTED_ERROR_2="at '/spec/distribution/modules/auth/pomerium': got object, want null" if [[ "${output}" != *"${EXPECTED_ERROR_1}"* ]]; then return 2 @@ -145,7 +145,7 @@ test_schema() { expect() { expect_no - local EXPECTED_ERROR_1="[S#/\$defs/Spec.Distribution.Modules.Auth/allOf/1/then/properties/provider/required] missing properties: 'basicAuth'" + local EXPECTED_ERROR_1="at '/spec/distribution/modules/auth/provider': missing property 'basicAuth'" if [[ "${output}" != *"${EXPECTED_ERROR_1}"* ]]; then return 2 @@ -169,7 +169,7 @@ test_schema() { expect() { expect_no - local EXPECTED_ERROR_1="[S#/\$defs/Spec.Distribution/else/properties/modules/properties/aws/type] expected null, but got object" + local EXPECTED_ERROR_1="at '/spec/distribution/modules/aws': got object, want null" if [[ "${output}" != *"${EXPECTED_ERROR_1}"* ]]; then return 2 @@ -193,8 +193,8 @@ test_schema() { expect() { expect_no - local EXPECTED_ERROR_1="[S#/\$defs/Spec.Distribution.Modules.Ingress.Nginx.TLS/then/required] missing properties: 'secret'" - local EXPECTED_ERROR_2="[S#/\$defs/Spec.Distribution/then/properties/modules/required] missing properties: 'aws'" + local EXPECTED_ERROR_1="at '/spec/distribution/modules/ingress/nginx/tls': missing property 'secret'" + local EXPECTED_ERROR_2="at '/spec/distribution/modules': missing property 'aws'" if [[ "${output}" != *"${EXPECTED_ERROR_1}"* ]]; then return 2 @@ -222,7 +222,7 @@ test_schema() { expect() { expect_no - local EXPECTED_ERROR_1="[S#/\$defs/Spec.Distribution/then/properties/modules/required] missing properties: 'aws'" + local EXPECTED_ERROR_1="at '/spec/distribution/modules': missing property 'aws'" if [[ "${output}" != *"${EXPECTED_ERROR_1}"* ]]; then return 2 @@ -246,7 +246,7 @@ test_schema() { expect() { expect_no - local EXPECTED_ERROR_1="[S#/\$defs/Spec.Distribution.CustomPatches.Patch/oneOf] valid against schemas at indexes 0 and 1" + local EXPECTED_ERROR_1="at '/spec/distribution/customPatches/patches/0': oneOf failed, subschemas 0, 1 matched" if [[ "${output}" != *"${EXPECTED_ERROR_1}"* ]]; then return 2 @@ -270,7 +270,7 @@ test_schema() { expect() { expect_no - local EXPECTED_ERROR_1="additionalProperties 'type' not allowed" + local EXPECTED_ERROR_1="at '/spec/distribution/customPatches/configMapGenerator/0': additional properties 'type' not allowed" if [[ "${output}" != *"${EXPECTED_ERROR_1}"* ]]; then return 2 @@ -294,7 +294,7 @@ test_schema() { expect() { expect_no - local EXPECTED_ERROR_1="expected null, but got string" + local EXPECTED_ERROR_1="at '/spec/infrastructure/vpn/vpcId': got string, want null" if [[ "${output}" != *"${EXPECTED_ERROR_1}"* ]]; then return 2 @@ -318,7 +318,7 @@ test_schema() { expect() { expect_no - local EXPECTED_ERROR_1="missing properties: 'vpcId'" + local EXPECTED_ERROR_1=" at '/spec/infrastructure/vpn': missing property 'vpcId'" if [[ "${output}" != *"${EXPECTED_ERROR_1}"* ]]; then return 2 diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/001-no.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/001-no.yaml index 70642084f..562c66dc4 100644 --- a/tests/schemas/private/ekscluster-kfd-v1alpha2/001-no.yaml +++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/001-no.yaml @@ -23,21 +23,21 @@ spec: cidr: 10.0.0.0/16 subnetsCidrs: private: - - 10.0.182.0/24 - - 10.0.172.0/24 - - 10.0.162.0/24 + - 10.0.182.0/24 + - 10.0.172.0/24 + - 10.0.162.0/24 public: - - 10.0.20.0/24 - - 10.0.30.0/24 - - 10.0.40.0/24 + - 10.0.20.0/24 + - 10.0.30.0/24 + - 10.0.40.0/24 vpn: ssh: allowedFromCidrs: - - 0.0.0.0/0 + - 0.0.0.0/0 githubUsersName: - - jnardiello + - jnardiello publicKeys: - - ssh-ed25519 SomethingSomething engineering@sighup.io + - ssh-ed25519 SomethingSomething engineering@sighup.io vpnClientsSubnetCidr: 192.168.200.0/24 kubernetes: apiServer: @@ -52,10 +52,12 @@ spec: - subnet-0123456789abcdef2 nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks @@ -90,9 +92,9 @@ spec: policy: type: gatekeeper gatekeeper: - additionalExcludedNamespaces: [] - installDefaultPolicies: true - enforcementAction: deny + additionalExcludedNamespaces: [] + installDefaultPolicies: true + enforcementAction: deny distributionVersion: v1.24.1 region: eu-west-1 diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/001-ok.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/001-ok.yaml index a51a16d9b..6242d3fc3 100644 --- a/tests/schemas/private/ekscluster-kfd-v1alpha2/001-ok.yaml +++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/001-ok.yaml @@ -22,21 +22,21 @@ spec: cidr: 10.0.0.0/16 subnetsCidrs: private: - - 10.0.182.0/24 - - 10.0.172.0/24 - - 10.0.162.0/24 + - 10.0.182.0/24 + - 10.0.172.0/24 + - 10.0.162.0/24 public: - - 10.0.20.0/24 - - 10.0.30.0/24 - - 10.0.40.0/24 + - 10.0.20.0/24 + - 10.0.30.0/24 + - 10.0.40.0/24 vpn: ssh: allowedFromCidrs: - - 0.0.0.0/0 + - 0.0.0.0/0 githubUsersName: - - jnardiello + - jnardiello publicKeys: - - ssh-ed25519 SomethingSomething engineering@sighup.io + - ssh-ed25519 SomethingSomething engineering@sighup.io vpnClientsSubnetCidr: 192.168.200.0/24 kubernetes: apiServer: @@ -46,10 +46,12 @@ spec: publicAccess: false nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks @@ -109,9 +111,9 @@ spec: policy: type: gatekeeper gatekeeper: - additionalExcludedNamespaces: [] - installDefaultPolicies: true - enforcementAction: deny + additionalExcludedNamespaces: [] + installDefaultPolicies: true + enforcementAction: deny distributionVersion: v1.24.1 region: eu-west-1 toolsConfiguration: diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/002-no.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/002-no.yaml index 7d5f105b1..4ae4d5a2b 100644 --- a/tests/schemas/private/ekscluster-kfd-v1alpha2/002-no.yaml +++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/002-no.yaml @@ -19,10 +19,12 @@ spec: kubernetes: nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks @@ -57,9 +59,9 @@ spec: policy: type: gatekeeper gatekeeper: - additionalExcludedNamespaces: [] - installDefaultPolicies: true - enforcementAction: deny + additionalExcludedNamespaces: [] + installDefaultPolicies: true + enforcementAction: deny distributionVersion: v1.24.1 region: eu-west-1 toolsConfiguration: diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/002-ok.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/002-ok.yaml index 9be853284..a441afdec 100644 --- a/tests/schemas/private/ekscluster-kfd-v1alpha2/002-ok.yaml +++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/002-ok.yaml @@ -29,10 +29,12 @@ spec: - subnet-0123456789abcdef2 nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks @@ -79,9 +81,9 @@ spec: policy: type: gatekeeper gatekeeper: - additionalExcludedNamespaces: [] - installDefaultPolicies: true - enforcementAction: deny + additionalExcludedNamespaces: [] + installDefaultPolicies: true + enforcementAction: deny distributionVersion: v1.24.1 region: eu-west-1 toolsConfiguration: diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/003-no.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/003-no.yaml index 1b38e7926..d1421afba 100644 --- a/tests/schemas/private/ekscluster-kfd-v1alpha2/003-no.yaml +++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/003-no.yaml @@ -22,21 +22,21 @@ spec: cidr: 10.0.0.0/16 subnetsCidrs: private: - - 10.0.182.0/24 - - 10.0.172.0/24 - - 10.0.162.0/24 + - 10.0.182.0/24 + - 10.0.172.0/24 + - 10.0.162.0/24 public: - - 10.0.20.0/24 - - 10.0.30.0/24 - - 10.0.40.0/24 + - 10.0.20.0/24 + - 10.0.30.0/24 + - 10.0.40.0/24 vpn: ssh: allowedFromCidrs: - - 0.0.0.0/0 + - 0.0.0.0/0 githubUsersName: - - jnardiello + - jnardiello publicKeys: - - ssh-ed25519 SomethingSomething engineering@sighup.io + - ssh-ed25519 SomethingSomething engineering@sighup.io vpnClientsSubnetCidr: 192.168.200.0/24 kubernetes: apiServer: @@ -46,10 +46,12 @@ spec: publicAccess: false nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks @@ -96,9 +98,9 @@ spec: policy: type: gatekeeper gatekeeper: - additionalExcludedNamespaces: [] - installDefaultPolicies: true - enforcementAction: deny + additionalExcludedNamespaces: [] + installDefaultPolicies: true + enforcementAction: deny distributionVersion: v1.24.1 region: eu-west-1 toolsConfiguration: diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/003-ok.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/003-ok.yaml index 3a5ce6254..5629057b5 100644 --- a/tests/schemas/private/ekscluster-kfd-v1alpha2/003-ok.yaml +++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/003-ok.yaml @@ -22,21 +22,21 @@ spec: cidr: 10.0.0.0/16 subnetsCidrs: private: - - 10.0.182.0/24 - - 10.0.172.0/24 - - 10.0.162.0/24 + - 10.0.182.0/24 + - 10.0.172.0/24 + - 10.0.162.0/24 public: - - 10.0.20.0/24 - - 10.0.30.0/24 - - 10.0.40.0/24 + - 10.0.20.0/24 + - 10.0.30.0/24 + - 10.0.40.0/24 vpn: ssh: allowedFromCidrs: - - 0.0.0.0/0 + - 0.0.0.0/0 githubUsersName: - - jnardiello + - jnardiello publicKeys: - - ssh-ed25519 SomethingSomething engineering@sighup.io + - ssh-ed25519 SomethingSomething engineering@sighup.io vpnClientsSubnetCidr: 192.168.200.0/24 kubernetes: apiServer: @@ -46,10 +46,12 @@ spec: publicAccess: false nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks @@ -109,9 +111,9 @@ spec: policy: type: gatekeeper gatekeeper: - additionalExcludedNamespaces: [] - installDefaultPolicies: true - enforcementAction: deny + additionalExcludedNamespaces: [] + installDefaultPolicies: true + enforcementAction: deny distributionVersion: v1.24.1 region: eu-west-1 toolsConfiguration: diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/004-no.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/004-no.yaml index 1aa6224f9..aa75e4853 100644 --- a/tests/schemas/private/ekscluster-kfd-v1alpha2/004-no.yaml +++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/004-no.yaml @@ -21,21 +21,21 @@ spec: cidr: 10.0.0.0/16 subnetsCidrs: private: - - 10.0.182.0/24 - - 10.0.172.0/24 - - 10.0.162.0/24 + - 10.0.182.0/24 + - 10.0.172.0/24 + - 10.0.162.0/24 public: - - 10.0.20.0/24 - - 10.0.30.0/24 - - 10.0.40.0/24 + - 10.0.20.0/24 + - 10.0.30.0/24 + - 10.0.40.0/24 vpn: ssh: allowedFromCidrs: - - 0.0.0.0/0 + - 0.0.0.0/0 githubUsersName: - - jnardiello + - jnardiello publicKeys: - - ssh-ed25519 SomethingSomething engineering@sighup.io + - ssh-ed25519 SomethingSomething engineering@sighup.io vpnClientsSubnetCidr: 192.168.200.0/24 kubernetes: apiServer: @@ -45,10 +45,12 @@ spec: publicAccess: false nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks @@ -86,9 +88,9 @@ spec: policy: type: gatekeeper gatekeeper: - additionalExcludedNamespaces: [] - installDefaultPolicies: true - enforcementAction: deny + additionalExcludedNamespaces: [] + installDefaultPolicies: true + enforcementAction: deny distributionVersion: v1.24.1 region: eu-west-1 toolsConfiguration: diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/004-ok.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/004-ok.yaml index 9b5ea4412..4f0950e57 100644 --- a/tests/schemas/private/ekscluster-kfd-v1alpha2/004-ok.yaml +++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/004-ok.yaml @@ -21,21 +21,21 @@ spec: cidr: 10.0.0.0/16 subnetsCidrs: private: - - 10.0.182.0/24 - - 10.0.172.0/24 - - 10.0.162.0/24 + - 10.0.182.0/24 + - 10.0.172.0/24 + - 10.0.162.0/24 public: - - 10.0.20.0/24 - - 10.0.30.0/24 - - 10.0.40.0/24 + - 10.0.20.0/24 + - 10.0.30.0/24 + - 10.0.40.0/24 vpn: ssh: allowedFromCidrs: - - 0.0.0.0/0 + - 0.0.0.0/0 githubUsersName: - - jnardiello + - jnardiello publicKeys: - - ssh-ed25519 SomethingSomething engineering@sighup.io + - ssh-ed25519 SomethingSomething engineering@sighup.io vpnClientsSubnetCidr: 192.168.200.0/24 kubernetes: apiServer: @@ -45,10 +45,12 @@ spec: publicAccess: false nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks @@ -101,9 +103,9 @@ spec: policy: type: gatekeeper gatekeeper: - additionalExcludedNamespaces: [] - installDefaultPolicies: true - enforcementAction: deny + additionalExcludedNamespaces: [] + installDefaultPolicies: true + enforcementAction: deny distributionVersion: v1.24.1 region: eu-west-1 toolsConfiguration: diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/005-no.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/005-no.yaml index 8c5154c4e..f770659ef 100644 --- a/tests/schemas/private/ekscluster-kfd-v1alpha2/005-no.yaml +++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/005-no.yaml @@ -21,21 +21,21 @@ spec: cidr: 10.0.0.0/16 subnetsCidrs: private: - - 10.0.182.0/24 - - 10.0.172.0/24 - - 10.0.162.0/24 + - 10.0.182.0/24 + - 10.0.172.0/24 + - 10.0.162.0/24 public: - - 10.0.20.0/24 - - 10.0.30.0/24 - - 10.0.40.0/24 + - 10.0.20.0/24 + - 10.0.30.0/24 + - 10.0.40.0/24 vpn: ssh: allowedFromCidrs: - - 0.0.0.0/0 + - 0.0.0.0/0 githubUsersName: - - jnardiello + - jnardiello publicKeys: - - ssh-ed25519 SomethingSomething engineering@sighup.io + - ssh-ed25519 SomethingSomething engineering@sighup.io vpnClientsSubnetCidr: 192.168.200.0/24 kubernetes: apiServer: @@ -45,10 +45,12 @@ spec: publicAccess: false nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks @@ -93,9 +95,9 @@ spec: policy: type: gatekeeper gatekeeper: - additionalExcludedNamespaces: [] - installDefaultPolicies: true - enforcementAction: deny + additionalExcludedNamespaces: [] + installDefaultPolicies: true + enforcementAction: deny distributionVersion: v1.24.1 region: eu-west-1 toolsConfiguration: diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/005-ok.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/005-ok.yaml index 0a7ab5b09..afdfeb7d1 100644 --- a/tests/schemas/private/ekscluster-kfd-v1alpha2/005-ok.yaml +++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/005-ok.yaml @@ -21,21 +21,21 @@ spec: cidr: 10.0.0.0/16 subnetsCidrs: private: - - 10.0.182.0/24 - - 10.0.172.0/24 - - 10.0.162.0/24 + - 10.0.182.0/24 + - 10.0.172.0/24 + - 10.0.162.0/24 public: - - 10.0.20.0/24 - - 10.0.30.0/24 - - 10.0.40.0/24 + - 10.0.20.0/24 + - 10.0.30.0/24 + - 10.0.40.0/24 vpn: ssh: allowedFromCidrs: - - 0.0.0.0/0 + - 0.0.0.0/0 githubUsersName: - - jnardiello + - jnardiello publicKeys: - - ssh-ed25519 SomethingSomething engineering@sighup.io + - ssh-ed25519 SomethingSomething engineering@sighup.io vpnClientsSubnetCidr: 192.168.200.0/24 kubernetes: apiServer: @@ -45,10 +45,12 @@ spec: publicAccess: false nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks @@ -95,9 +97,9 @@ spec: policy: type: gatekeeper gatekeeper: - additionalExcludedNamespaces: [] - installDefaultPolicies: true - enforcementAction: deny + additionalExcludedNamespaces: [] + installDefaultPolicies: true + enforcementAction: deny distributionVersion: v1.24.1 region: eu-west-1 toolsConfiguration: diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/006-no.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/006-no.yaml index d0687eb1a..f1b942728 100644 --- a/tests/schemas/private/ekscluster-kfd-v1alpha2/006-no.yaml +++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/006-no.yaml @@ -11,99 +11,101 @@ # And the error "missing properties: 'aws'" is returned --- - apiVersion: kfd.sighup.io/v1alpha2 - kind: EKSCluster - metadata: - name: furyctl-dev-aws-al - spec: - infrastructure: - vpc: - network: - cidr: 10.0.0.0/16 - subnetsCidrs: - private: +apiVersion: kfd.sighup.io/v1alpha2 +kind: EKSCluster +metadata: + name: furyctl-dev-aws-al +spec: + infrastructure: + vpc: + network: + cidr: 10.0.0.0/16 + subnetsCidrs: + private: - 10.0.182.0/24 - 10.0.172.0/24 - 10.0.162.0/24 - public: + public: - 10.0.20.0/24 - 10.0.30.0/24 - 10.0.40.0/24 - vpn: - ssh: - allowedFromCidrs: + vpn: + ssh: + allowedFromCidrs: - 0.0.0.0/0 - githubUsersName: + githubUsersName: - jnardiello - publicKeys: + publicKeys: - ssh-ed25519 SomethingSomething engineering@sighup.io - vpnClientsSubnetCidr: 192.168.200.0/24 - kubernetes: - apiServer: - privateAccess: true - privateAccessCidrs: ["0.0.0.0/0"] - publicAccessCidrs: [] - publicAccess: false - nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io - nodePoolsLaunchKind: both - nodePools: - - ami: - id: ami-01234567890123456 - owner: "123456789012" - instance: - type: t3.large - name: worker-eks - size: - max: 3 - min: 2 - distribution: - common: + vpnClientsSubnetCidr: 192.168.200.0/24 + kubernetes: + apiServer: + privateAccess: true + privateAccessCidrs: ["0.0.0.0/0"] + publicAccessCidrs: [] + publicAccess: false + nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io + nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" + nodePools: + - ami: + id: ami-01234567890123456 + owner: "123456789012" + type: self-managed + instance: + type: t3.large + name: worker-eks + size: + max: 3 + min: 2 + distribution: + common: + provider: + type: eks + modules: + auth: provider: - type: eks - modules: - auth: - provider: - type: basicAuth - basicAuth: - password: admin - username: admin - dr: - type: eks - velero: - eks: - bucketName: example-velero - iamRoleArn: arn:aws:iam::123456789012:role/example-velero - region: eu-west-1 - ingress: - baseDomain: furyctl-demo.sighup.io - dns: - private: - create: true - name: internal.furyctl-demo.sighup.io - vpcId: "" - public: - create: true - name: furyctl-demo.sighup.io - nginx: - type: single - tls: - provider: secret - logging: - type: opensearch - opensearch: - type: single - policy: - type: gatekeeper - gatekeeper: - additionalExcludedNamespaces: [] - installDefaultPolicies: true - enforcementAction: deny - distributionVersion: v1.24.1 - region: eu-west-1 - toolsConfiguration: - terraform: - state: - s3: - bucketName: furyctl-test-eks - keyPrefix: furyctl-test + type: basicAuth + basicAuth: + password: admin + username: admin + dr: + type: eks + velero: + eks: + bucketName: example-velero + iamRoleArn: arn:aws:iam::123456789012:role/example-velero region: eu-west-1 + ingress: + baseDomain: furyctl-demo.sighup.io + dns: + private: + create: true + name: internal.furyctl-demo.sighup.io + vpcId: "" + public: + create: true + name: furyctl-demo.sighup.io + nginx: + type: single + tls: + provider: secret + logging: + type: opensearch + opensearch: + type: single + policy: + type: gatekeeper + gatekeeper: + additionalExcludedNamespaces: [] + installDefaultPolicies: true + enforcementAction: deny + distributionVersion: v1.24.1 + region: eu-west-1 + toolsConfiguration: + terraform: + state: + s3: + bucketName: furyctl-test-eks + keyPrefix: furyctl-test + region: eu-west-1 diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/006-ok.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/006-ok.yaml index ab8e4c018..7d0d4e962 100644 --- a/tests/schemas/private/ekscluster-kfd-v1alpha2/006-ok.yaml +++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/006-ok.yaml @@ -21,21 +21,21 @@ spec: cidr: 10.0.0.0/16 subnetsCidrs: private: - - 10.0.182.0/24 - - 10.0.172.0/24 - - 10.0.162.0/24 + - 10.0.182.0/24 + - 10.0.172.0/24 + - 10.0.162.0/24 public: - - 10.0.20.0/24 - - 10.0.30.0/24 - - 10.0.40.0/24 + - 10.0.20.0/24 + - 10.0.30.0/24 + - 10.0.40.0/24 vpn: ssh: allowedFromCidrs: - - 0.0.0.0/0 + - 0.0.0.0/0 githubUsersName: - - jnardiello + - jnardiello publicKeys: - - ssh-ed25519 SomethingSomething engineering@sighup.io + - ssh-ed25519 SomethingSomething engineering@sighup.io vpnClientsSubnetCidr: 192.168.200.0/24 kubernetes: apiServer: @@ -45,10 +45,12 @@ spec: publicAccess: false nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks @@ -104,9 +106,9 @@ spec: policy: type: gatekeeper gatekeeper: - additionalExcludedNamespaces: [] - installDefaultPolicies: true - enforcementAction: deny + additionalExcludedNamespaces: [] + installDefaultPolicies: true + enforcementAction: deny distributionVersion: v1.24.1 region: eu-west-1 toolsConfiguration: diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/007-no.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/007-no.yaml index 3268afb94..ff0ed51dd 100644 --- a/tests/schemas/private/ekscluster-kfd-v1alpha2/007-no.yaml +++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/007-no.yaml @@ -10,97 +10,99 @@ # Then an error "missing properties: 'aws'" is returned --- - apiVersion: kfd.sighup.io/v1alpha2 - kind: EKSCluster - metadata: - name: furyctl-dev-aws-al - spec: - infrastructure: - vpc: - network: - cidr: 10.0.0.0/16 - subnetsCidrs: - private: +apiVersion: kfd.sighup.io/v1alpha2 +kind: EKSCluster +metadata: + name: furyctl-dev-aws-al +spec: + infrastructure: + vpc: + network: + cidr: 10.0.0.0/16 + subnetsCidrs: + private: - 10.0.182.0/24 - 10.0.172.0/24 - 10.0.162.0/24 - public: + public: - 10.0.20.0/24 - 10.0.30.0/24 - 10.0.40.0/24 - vpn: - ssh: - allowedFromCidrs: + vpn: + ssh: + allowedFromCidrs: - 0.0.0.0/0 - githubUsersName: + githubUsersName: - jnardiello - publicKeys: + publicKeys: - ssh-ed25519 SomethingSomething engineering@sighup.io - vpnClientsSubnetCidr: 192.168.200.0/24 - kubernetes: - apiServer: - privateAccess: true - privateAccessCidrs: ["0.0.0.0/0"] - publicAccessCidrs: [] - publicAccess: false - nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io - nodePoolsLaunchKind: both - nodePools: - - ami: - id: ami-01234567890123456 - owner: "123456789012" - instance: - type: t3.large - name: worker-eks - size: - max: 3 - min: 2 - distribution: - common: + vpnClientsSubnetCidr: 192.168.200.0/24 + kubernetes: + apiServer: + privateAccess: true + privateAccessCidrs: ["0.0.0.0/0"] + publicAccessCidrs: [] + publicAccess: false + nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io + nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" + nodePools: + - ami: + id: ami-01234567890123456 + owner: "123456789012" + type: self-managed + instance: + type: t3.large + name: worker-eks + size: + max: 3 + min: 2 + distribution: + common: + provider: + type: eks + modules: + auth: provider: - type: eks - modules: - auth: - provider: - type: basicAuth - basicAuth: - password: admin - username: admin - dr: - type: eks - velero: - eks: - bucketName: example-velero - iamRoleArn: arn:aws:iam::123456789012:role/example-velero - region: eu-west-1 - ingress: - baseDomain: furyctl-demo.sighup.io - dns: - private: - create: true - name: internal.furyctl-demo.sighup.io - vpcId: "" - public: - create: true - name: furyctl-demo.sighup.io - nginx: - type: single - logging: - type: opensearch - opensearch: - type: single - policy: - type: gatekeeper - gatekeeper: - additionalExcludedNamespaces: [] - installDefaultPolicies: true - enforcementAction: deny - distributionVersion: v1.24.1 - region: eu-west-1 - toolsConfiguration: - terraform: - state: - s3: - bucketName: furyctl-test-eks - keyPrefix: furyctl-test + type: basicAuth + basicAuth: + password: admin + username: admin + dr: + type: eks + velero: + eks: + bucketName: example-velero + iamRoleArn: arn:aws:iam::123456789012:role/example-velero region: eu-west-1 + ingress: + baseDomain: furyctl-demo.sighup.io + dns: + private: + create: true + name: internal.furyctl-demo.sighup.io + vpcId: "" + public: + create: true + name: furyctl-demo.sighup.io + nginx: + type: single + logging: + type: opensearch + opensearch: + type: single + policy: + type: gatekeeper + gatekeeper: + additionalExcludedNamespaces: [] + installDefaultPolicies: true + enforcementAction: deny + distributionVersion: v1.24.1 + region: eu-west-1 + toolsConfiguration: + terraform: + state: + s3: + bucketName: furyctl-test-eks + keyPrefix: furyctl-test + region: eu-west-1 diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/007-ok.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/007-ok.yaml index 67e898632..3c098542f 100644 --- a/tests/schemas/private/ekscluster-kfd-v1alpha2/007-ok.yaml +++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/007-ok.yaml @@ -21,21 +21,21 @@ spec: cidr: 10.0.0.0/16 subnetsCidrs: private: - - 10.0.182.0/24 - - 10.0.172.0/24 - - 10.0.162.0/24 + - 10.0.182.0/24 + - 10.0.172.0/24 + - 10.0.162.0/24 public: - - 10.0.20.0/24 - - 10.0.30.0/24 - - 10.0.40.0/24 + - 10.0.20.0/24 + - 10.0.30.0/24 + - 10.0.40.0/24 vpn: ssh: allowedFromCidrs: - - 0.0.0.0/0 + - 0.0.0.0/0 githubUsersName: - - jnardiello + - jnardiello publicKeys: - - ssh-ed25519 SomethingSomething engineering@sighup.io + - ssh-ed25519 SomethingSomething engineering@sighup.io vpnClientsSubnetCidr: 192.168.200.0/24 kubernetes: apiServer: @@ -45,10 +45,12 @@ spec: publicAccess: false nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks @@ -67,7 +69,7 @@ spec: iamRoleArn: arn:aws:iam::123456789012:role/ebs-csi-driver loadBalancerController: iamRoleArn: arn:aws:iam::123456789012:role/load-balancer-controller - overrides: { } + overrides: {} dr: type: eks velero: @@ -115,9 +117,9 @@ spec: policy: type: gatekeeper gatekeeper: - additionalExcludedNamespaces: [] - installDefaultPolicies: true - enforcementAction: deny + additionalExcludedNamespaces: [] + installDefaultPolicies: true + enforcementAction: deny distributionVersion: v1.24.1 region: eu-west-1 toolsConfiguration: diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/008-no.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/008-no.yaml index 39a89d27e..d01de7030 100644 --- a/tests/schemas/private/ekscluster-kfd-v1alpha2/008-no.yaml +++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/008-no.yaml @@ -21,21 +21,21 @@ spec: cidr: 10.0.0.0/16 subnetsCidrs: private: - - 10.0.182.0/24 - - 10.0.172.0/24 - - 10.0.162.0/24 + - 10.0.182.0/24 + - 10.0.172.0/24 + - 10.0.162.0/24 public: - - 10.0.20.0/24 - - 10.0.30.0/24 - - 10.0.40.0/24 + - 10.0.20.0/24 + - 10.0.30.0/24 + - 10.0.40.0/24 vpn: ssh: allowedFromCidrs: - - 0.0.0.0/0 + - 0.0.0.0/0 githubUsersName: - - jnardiello + - jnardiello publicKeys: - - ssh-ed25519 SomethingSomething engineering@sighup.io + - ssh-ed25519 SomethingSomething engineering@sighup.io vpnClientsSubnetCidr: 192.168.200.0/24 kubernetes: apiServer: @@ -45,10 +45,12 @@ spec: publicAccess: false nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks @@ -58,42 +60,42 @@ spec: distribution: customPatches: configMapGenerator: - - name: a-configmap - files: - - /path/to/config.example - - name: b-configmap - envs: - - /path/to/envs.env + - name: a-configmap + files: + - /path/to/config.example + - name: b-configmap + envs: + - /path/to/envs.env patches: - - target: - group: "" - version: v1 + - target: + group: "" + version: v1 + kind: Service + name: cluster-autoscaler + namespace: kube-system + path: /path/to/patch.yaml + patch: | + - op: add + path: /metadata/labels + value: + label1: value1 + patchesStrategicMerge: + - | + --- + apiVersion: v1 kind: Service - name: cluster-autoscaler - namespace: kube-system - path: /path/to/patch.yaml - patch: | - - op: add - path: /metadata/labels - value: + metadata: + labels: label1: value1 - patchesStrategicMerge: - - | - --- - apiVersion: v1 - kind: Service - metadata: - labels: - label1: value1 - name: cluster-autoscaler - namespace: kube-system + name: cluster-autoscaler + namespace: kube-system secretGenerator: - - name: a-secret - files: - - /path/to/config.example - - name: b-secret - envs: - - /path/to/envs.env + - name: a-secret + files: + - /path/to/config.example + - name: b-secret + envs: + - /path/to/envs.env common: provider: type: eks @@ -104,8 +106,8 @@ spec: ebsCsiDriver: iamRoleArn: arn:aws:iam::123456789012:role/ebs-csi-driver loadBalancerController: - iamRoleArn: arn:aws:iam::123456789012:role/load-balancer-controller - overrides: { } + iamRoleArn: arn:aws:iam::123456789012:role/load-balancer-controller + overrides: {} dr: type: eks velero: @@ -153,9 +155,9 @@ spec: policy: type: gatekeeper gatekeeper: - additionalExcludedNamespaces: [] - installDefaultPolicies: true - enforcementAction: deny + additionalExcludedNamespaces: [] + installDefaultPolicies: true + enforcementAction: deny distributionVersion: v1.24.1 region: eu-west-1 toolsConfiguration: diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/008-ok.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/008-ok.yaml index 5585e4d7f..9cbd6ec62 100644 --- a/tests/schemas/private/ekscluster-kfd-v1alpha2/008-ok.yaml +++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/008-ok.yaml @@ -20,21 +20,21 @@ spec: cidr: 10.0.0.0/16 subnetsCidrs: private: - - 10.0.182.0/24 - - 10.0.172.0/24 - - 10.0.162.0/24 + - 10.0.182.0/24 + - 10.0.172.0/24 + - 10.0.162.0/24 public: - - 10.0.20.0/24 - - 10.0.30.0/24 - - 10.0.40.0/24 + - 10.0.20.0/24 + - 10.0.30.0/24 + - 10.0.40.0/24 vpn: ssh: allowedFromCidrs: - - 0.0.0.0/0 + - 0.0.0.0/0 githubUsersName: - - jnardiello + - jnardiello publicKeys: - - ssh-ed25519 SomethingSomething engineering@sighup.io + - ssh-ed25519 SomethingSomething engineering@sighup.io vpnClientsSubnetCidr: 192.168.200.0/24 kubernetes: apiServer: @@ -44,10 +44,12 @@ spec: publicAccess: false nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks @@ -57,37 +59,37 @@ spec: distribution: customPatches: configMapGenerator: - - name: a-configmap - files: - - /path/to/config.example - - name: b-configmap - envs: - - /path/to/envs.env + - name: a-configmap + files: + - /path/to/config.example + - name: b-configmap + envs: + - /path/to/envs.env patches: - - target: - group: "" - version: v1 - kind: Service - name: cluster-autoscaler - namespace: kube-system - path: /path/to/patch.yaml + - target: + group: "" + version: v1 + kind: Service + name: cluster-autoscaler + namespace: kube-system + path: /path/to/patch.yaml patchesStrategicMerge: - - | - --- - apiVersion: v1 - kind: Service - metadata: - labels: - label1: value1 - name: cluster-autoscaler - namespace: kube-system + - | + --- + apiVersion: v1 + kind: Service + metadata: + labels: + label1: value1 + name: cluster-autoscaler + namespace: kube-system secretGenerator: - - name: a-secret - files: - - /path/to/config.example - - name: b-secret - envs: - - /path/to/envs.env + - name: a-secret + files: + - /path/to/config.example + - name: b-secret + envs: + - /path/to/envs.env common: provider: type: eks @@ -98,8 +100,8 @@ spec: ebsCsiDriver: iamRoleArn: arn:aws:iam::123456789012:role/ebs-csi-driver loadBalancerController: - iamRoleArn: arn:aws:iam::123456789012:role/load-balancer-controller - overrides: { } + iamRoleArn: arn:aws:iam::123456789012:role/load-balancer-controller + overrides: {} dr: type: eks velero: @@ -147,9 +149,9 @@ spec: policy: type: gatekeeper gatekeeper: - additionalExcludedNamespaces: [] - installDefaultPolicies: true - enforcementAction: deny + additionalExcludedNamespaces: [] + installDefaultPolicies: true + enforcementAction: deny distributionVersion: v1.24.1 region: eu-west-1 toolsConfiguration: diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/009-no.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/009-no.yaml index 05ec928b5..f857d991c 100644 --- a/tests/schemas/private/ekscluster-kfd-v1alpha2/009-no.yaml +++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/009-no.yaml @@ -21,21 +21,21 @@ spec: cidr: 10.0.0.0/16 subnetsCidrs: private: - - 10.0.182.0/24 - - 10.0.172.0/24 - - 10.0.162.0/24 + - 10.0.182.0/24 + - 10.0.172.0/24 + - 10.0.162.0/24 public: - - 10.0.20.0/24 - - 10.0.30.0/24 - - 10.0.40.0/24 + - 10.0.20.0/24 + - 10.0.30.0/24 + - 10.0.40.0/24 vpn: ssh: allowedFromCidrs: - - 0.0.0.0/0 + - 0.0.0.0/0 githubUsersName: - - jnardiello + - jnardiello publicKeys: - - ssh-ed25519 SomethingSomething engineering@sighup.io + - ssh-ed25519 SomethingSomething engineering@sighup.io vpnClientsSubnetCidr: 192.168.200.0/24 kubernetes: apiServer: @@ -45,10 +45,12 @@ spec: publicAccess: false nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks @@ -58,39 +60,39 @@ spec: distribution: customPatches: configMapGenerator: - - name: a-configmap - files: - - /path/to/config.example - type: Opaque - - name: b-configmap - envs: - - /path/to/envs.env + - name: a-configmap + files: + - /path/to/config.example + type: Opaque + - name: b-configmap + envs: + - /path/to/envs.env patches: - - target: - group: "" - version: v1 - kind: Service - name: cluster-autoscaler - namespace: kube-system - path: /path/to/patch.yaml + - target: + group: "" + version: v1 + kind: Service + name: cluster-autoscaler + namespace: kube-system + path: /path/to/patch.yaml patchesStrategicMerge: - - | - --- - apiVersion: v1 - kind: Service - metadata: - labels: - label1: value1 - name: cluster-autoscaler - namespace: kube-system + - | + --- + apiVersion: v1 + kind: Service + metadata: + labels: + label1: value1 + name: cluster-autoscaler + namespace: kube-system secretGenerator: - - name: a-secret - files: - - /path/to/config.example - - name: b-secret - envs: - - /path/to/envs.env - type: Opaque + - name: a-secret + files: + - /path/to/config.example + - name: b-secret + envs: + - /path/to/envs.env + type: Opaque common: provider: type: eks @@ -101,8 +103,8 @@ spec: ebsCsiDriver: iamRoleArn: arn:aws:iam::123456789012:role/ebs-csi-driver loadBalancerController: - iamRoleArn: arn:aws:iam::123456789012:role/load-balancer-controller - overrides: { } + iamRoleArn: arn:aws:iam::123456789012:role/load-balancer-controller + overrides: {} dr: type: eks velero: @@ -150,9 +152,9 @@ spec: policy: type: gatekeeper gatekeeper: - additionalExcludedNamespaces: [] - installDefaultPolicies: true - enforcementAction: deny + additionalExcludedNamespaces: [] + installDefaultPolicies: true + enforcementAction: deny distributionVersion: v1.24.1 region: eu-west-1 toolsConfiguration: diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/009-ok.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/009-ok.yaml index 5585e4d7f..9cbd6ec62 100644 --- a/tests/schemas/private/ekscluster-kfd-v1alpha2/009-ok.yaml +++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/009-ok.yaml @@ -20,21 +20,21 @@ spec: cidr: 10.0.0.0/16 subnetsCidrs: private: - - 10.0.182.0/24 - - 10.0.172.0/24 - - 10.0.162.0/24 + - 10.0.182.0/24 + - 10.0.172.0/24 + - 10.0.162.0/24 public: - - 10.0.20.0/24 - - 10.0.30.0/24 - - 10.0.40.0/24 + - 10.0.20.0/24 + - 10.0.30.0/24 + - 10.0.40.0/24 vpn: ssh: allowedFromCidrs: - - 0.0.0.0/0 + - 0.0.0.0/0 githubUsersName: - - jnardiello + - jnardiello publicKeys: - - ssh-ed25519 SomethingSomething engineering@sighup.io + - ssh-ed25519 SomethingSomething engineering@sighup.io vpnClientsSubnetCidr: 192.168.200.0/24 kubernetes: apiServer: @@ -44,10 +44,12 @@ spec: publicAccess: false nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks @@ -57,37 +59,37 @@ spec: distribution: customPatches: configMapGenerator: - - name: a-configmap - files: - - /path/to/config.example - - name: b-configmap - envs: - - /path/to/envs.env + - name: a-configmap + files: + - /path/to/config.example + - name: b-configmap + envs: + - /path/to/envs.env patches: - - target: - group: "" - version: v1 - kind: Service - name: cluster-autoscaler - namespace: kube-system - path: /path/to/patch.yaml + - target: + group: "" + version: v1 + kind: Service + name: cluster-autoscaler + namespace: kube-system + path: /path/to/patch.yaml patchesStrategicMerge: - - | - --- - apiVersion: v1 - kind: Service - metadata: - labels: - label1: value1 - name: cluster-autoscaler - namespace: kube-system + - | + --- + apiVersion: v1 + kind: Service + metadata: + labels: + label1: value1 + name: cluster-autoscaler + namespace: kube-system secretGenerator: - - name: a-secret - files: - - /path/to/config.example - - name: b-secret - envs: - - /path/to/envs.env + - name: a-secret + files: + - /path/to/config.example + - name: b-secret + envs: + - /path/to/envs.env common: provider: type: eks @@ -98,8 +100,8 @@ spec: ebsCsiDriver: iamRoleArn: arn:aws:iam::123456789012:role/ebs-csi-driver loadBalancerController: - iamRoleArn: arn:aws:iam::123456789012:role/load-balancer-controller - overrides: { } + iamRoleArn: arn:aws:iam::123456789012:role/load-balancer-controller + overrides: {} dr: type: eks velero: @@ -147,9 +149,9 @@ spec: policy: type: gatekeeper gatekeeper: - additionalExcludedNamespaces: [] - installDefaultPolicies: true - enforcementAction: deny + additionalExcludedNamespaces: [] + installDefaultPolicies: true + enforcementAction: deny distributionVersion: v1.24.1 region: eu-west-1 toolsConfiguration: diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/010-no.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/010-no.yaml index 8f2947334..1b35fe071 100644 --- a/tests/schemas/private/ekscluster-kfd-v1alpha2/010-no.yaml +++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/010-no.yaml @@ -20,22 +20,22 @@ spec: cidr: 10.0.0.0/16 subnetsCidrs: private: - - 10.0.182.0/24 - - 10.0.172.0/24 - - 10.0.162.0/24 + - 10.0.182.0/24 + - 10.0.172.0/24 + - 10.0.162.0/24 public: - - 10.0.20.0/24 - - 10.0.30.0/24 - - 10.0.40.0/24 + - 10.0.20.0/24 + - 10.0.30.0/24 + - 10.0.40.0/24 vpn: vpcId: vpc-0123456789abcdef0 ssh: allowedFromCidrs: - - 0.0.0.0/0 + - 0.0.0.0/0 githubUsersName: - - jnardiello + - jnardiello publicKeys: - - ssh-ed25519 SomethingSomething engineering@sighup.io + - ssh-ed25519 SomethingSomething engineering@sighup.io vpnClientsSubnetCidr: 192.168.200.0/24 kubernetes: apiServer: @@ -45,10 +45,12 @@ spec: publicAccess: false nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks @@ -58,37 +60,37 @@ spec: distribution: customPatches: configMapGenerator: - - name: a-configmap - files: - - /path/to/config.example - - name: b-configmap - envs: - - /path/to/envs.env + - name: a-configmap + files: + - /path/to/config.example + - name: b-configmap + envs: + - /path/to/envs.env patches: - - target: - group: "" - version: v1 - kind: Service - name: cluster-autoscaler - namespace: kube-system - path: /path/to/patch.yaml + - target: + group: "" + version: v1 + kind: Service + name: cluster-autoscaler + namespace: kube-system + path: /path/to/patch.yaml patchesStrategicMerge: - - | - --- - apiVersion: v1 - kind: Service - metadata: - labels: - label1: value1 - name: cluster-autoscaler - namespace: kube-system + - | + --- + apiVersion: v1 + kind: Service + metadata: + labels: + label1: value1 + name: cluster-autoscaler + namespace: kube-system secretGenerator: - - name: a-secret - files: - - /path/to/config.example - - name: b-secret - envs: - - /path/to/envs.env + - name: a-secret + files: + - /path/to/config.example + - name: b-secret + envs: + - /path/to/envs.env common: provider: type: eks @@ -99,8 +101,8 @@ spec: ebsCsiDriver: iamRoleArn: arn:aws:iam::123456789012:role/ebs-csi-driver loadBalancerController: - iamRoleArn: arn:aws:iam::123456789012:role/load-balancer-controller - overrides: { } + iamRoleArn: arn:aws:iam::123456789012:role/load-balancer-controller + overrides: {} dr: type: eks velero: @@ -136,9 +138,9 @@ spec: policy: type: gatekeeper gatekeeper: - additionalExcludedNamespaces: [] - installDefaultPolicies: true - enforcementAction: deny + additionalExcludedNamespaces: [] + installDefaultPolicies: true + enforcementAction: deny distributionVersion: v1.24.1 region: eu-west-1 toolsConfiguration: diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/010-ok.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/010-ok.yaml index 17fe3d959..192ec94da 100644 --- a/tests/schemas/private/ekscluster-kfd-v1alpha2/010-ok.yaml +++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/010-ok.yaml @@ -20,21 +20,21 @@ spec: cidr: 10.0.0.0/16 subnetsCidrs: private: - - 10.0.182.0/24 - - 10.0.172.0/24 - - 10.0.162.0/24 + - 10.0.182.0/24 + - 10.0.172.0/24 + - 10.0.162.0/24 public: - - 10.0.20.0/24 - - 10.0.30.0/24 - - 10.0.40.0/24 + - 10.0.20.0/24 + - 10.0.30.0/24 + - 10.0.40.0/24 vpn: ssh: allowedFromCidrs: - - 0.0.0.0/0 + - 0.0.0.0/0 githubUsersName: - - jnardiello + - jnardiello publicKeys: - - ssh-ed25519 SomethingSomething engineering@sighup.io + - ssh-ed25519 SomethingSomething engineering@sighup.io vpnClientsSubnetCidr: 192.168.200.0/24 kubernetes: apiServer: @@ -44,10 +44,12 @@ spec: publicAccess: false nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks @@ -57,37 +59,37 @@ spec: distribution: customPatches: configMapGenerator: - - name: a-configmap - files: - - /path/to/config.example - - name: b-configmap - envs: - - /path/to/envs.env + - name: a-configmap + files: + - /path/to/config.example + - name: b-configmap + envs: + - /path/to/envs.env patches: - - target: - group: "" - version: v1 - kind: Service - name: cluster-autoscaler - namespace: kube-system - path: /path/to/patch.yaml + - target: + group: "" + version: v1 + kind: Service + name: cluster-autoscaler + namespace: kube-system + path: /path/to/patch.yaml patchesStrategicMerge: - - | - --- - apiVersion: v1 - kind: Service - metadata: - labels: - label1: value1 - name: cluster-autoscaler - namespace: kube-system + - | + --- + apiVersion: v1 + kind: Service + metadata: + labels: + label1: value1 + name: cluster-autoscaler + namespace: kube-system secretGenerator: - - name: a-secret - files: - - /path/to/config.example - - name: b-secret - envs: - - /path/to/envs.env + - name: a-secret + files: + - /path/to/config.example + - name: b-secret + envs: + - /path/to/envs.env common: provider: type: eks @@ -98,8 +100,8 @@ spec: ebsCsiDriver: iamRoleArn: arn:aws:iam::123456789012:role/ebs-csi-driver loadBalancerController: - iamRoleArn: arn:aws:iam::123456789012:role/load-balancer-controller - overrides: { } + iamRoleArn: arn:aws:iam::123456789012:role/load-balancer-controller + overrides: {} dr: type: eks velero: @@ -147,9 +149,9 @@ spec: policy: type: gatekeeper gatekeeper: - additionalExcludedNamespaces: [] - installDefaultPolicies: true - enforcementAction: deny + additionalExcludedNamespaces: [] + installDefaultPolicies: true + enforcementAction: deny distributionVersion: v1.24.1 region: eu-west-1 toolsConfiguration: diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/011-no.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/011-no.yaml index efef6adb1..724b2849c 100644 --- a/tests/schemas/private/ekscluster-kfd-v1alpha2/011-no.yaml +++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/011-no.yaml @@ -18,11 +18,11 @@ spec: vpn: ssh: allowedFromCidrs: - - 0.0.0.0/0 + - 0.0.0.0/0 githubUsersName: - - jnardiello + - jnardiello publicKeys: - - ssh-ed25519 SomethingSomething engineering@sighup.io + - ssh-ed25519 SomethingSomething engineering@sighup.io vpnClientsSubnetCidr: 192.168.200.0/24 kubernetes: apiServer: @@ -37,10 +37,12 @@ spec: - subnet-0123456789abcdef2 nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks @@ -50,37 +52,37 @@ spec: distribution: customPatches: configMapGenerator: - - name: a-configmap - files: - - /path/to/config.example - - name: b-configmap - envs: - - /path/to/envs.env + - name: a-configmap + files: + - /path/to/config.example + - name: b-configmap + envs: + - /path/to/envs.env patches: - - target: - group: "" - version: v1 - kind: Service - name: cluster-autoscaler - namespace: kube-system - path: /path/to/patch.yaml + - target: + group: "" + version: v1 + kind: Service + name: cluster-autoscaler + namespace: kube-system + path: /path/to/patch.yaml patchesStrategicMerge: - - | - --- - apiVersion: v1 - kind: Service - metadata: - labels: - label1: value1 - name: cluster-autoscaler - namespace: kube-system + - | + --- + apiVersion: v1 + kind: Service + metadata: + labels: + label1: value1 + name: cluster-autoscaler + namespace: kube-system secretGenerator: - - name: a-secret - files: - - /path/to/config.example - - name: b-secret - envs: - - /path/to/envs.env + - name: a-secret + files: + - /path/to/config.example + - name: b-secret + envs: + - /path/to/envs.env common: provider: type: eks @@ -119,9 +121,9 @@ spec: policy: type: gatekeeper gatekeeper: - additionalExcludedNamespaces: [] - installDefaultPolicies: true - enforcementAction: deny + additionalExcludedNamespaces: [] + installDefaultPolicies: true + enforcementAction: deny distributionVersion: v1.24.1 region: eu-west-1 toolsConfiguration: diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/011-ok.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/011-ok.yaml index c85f202fe..ebc3c17c8 100644 --- a/tests/schemas/private/ekscluster-kfd-v1alpha2/011-ok.yaml +++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/011-ok.yaml @@ -19,11 +19,11 @@ spec: vpcId: vpc-0123456789abcdef0 ssh: allowedFromCidrs: - - 0.0.0.0/0 + - 0.0.0.0/0 githubUsersName: - - jnardiello + - jnardiello publicKeys: - - ssh-ed25519 SomethingSomething engineering@sighup.io + - ssh-ed25519 SomethingSomething engineering@sighup.io vpnClientsSubnetCidr: 192.168.200.0/24 kubernetes: apiServer: @@ -38,10 +38,12 @@ spec: - subnet-0123456789abcdef2 nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks @@ -51,37 +53,37 @@ spec: distribution: customPatches: configMapGenerator: - - name: a-configmap - files: - - /path/to/config.example - - name: b-configmap - envs: - - /path/to/envs.env + - name: a-configmap + files: + - /path/to/config.example + - name: b-configmap + envs: + - /path/to/envs.env patches: - - target: - group: "" - version: v1 - kind: Service - name: cluster-autoscaler - namespace: kube-system - path: /path/to/patch.yaml + - target: + group: "" + version: v1 + kind: Service + name: cluster-autoscaler + namespace: kube-system + path: /path/to/patch.yaml patchesStrategicMerge: - - | - --- - apiVersion: v1 - kind: Service - metadata: - labels: - label1: value1 - name: cluster-autoscaler - namespace: kube-system + - | + --- + apiVersion: v1 + kind: Service + metadata: + labels: + label1: value1 + name: cluster-autoscaler + namespace: kube-system secretGenerator: - - name: a-secret - files: - - /path/to/config.example - - name: b-secret - envs: - - /path/to/envs.env + - name: a-secret + files: + - /path/to/config.example + - name: b-secret + envs: + - /path/to/envs.env common: provider: type: eks @@ -92,8 +94,8 @@ spec: ebsCsiDriver: iamRoleArn: arn:aws:iam::123456789012:role/ebs-csi-driver loadBalancerController: - iamRoleArn: arn:aws:iam::123456789012:role/load-balancer-controller - overrides: { } + iamRoleArn: arn:aws:iam::123456789012:role/load-balancer-controller + overrides: {} dr: type: eks velero: @@ -141,9 +143,9 @@ spec: policy: type: gatekeeper gatekeeper: - additionalExcludedNamespaces: [] - installDefaultPolicies: true - enforcementAction: deny + additionalExcludedNamespaces: [] + installDefaultPolicies: true + enforcementAction: deny distributionVersion: v1.24.1 region: eu-west-1 toolsConfiguration: diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/012-no.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/012-no.yaml new file mode 100644 index 000000000..8aab91a86 --- /dev/null +++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/012-no.yaml @@ -0,0 +1,135 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +# Tests the following cases: + +# Given "spec.kubernetes.nodePools.0.type" is 'eks-managed' and "spec.kubernetes.nodePools.0.ami.id" is filled +# When I validate the config against the schema +# Then an error "$ref/properties/nodePools/items/$ref/then/properties/ami/properties/id/type: expected null, but got string" is returned + +--- +apiVersion: kfd.sighup.io/v1alpha2 +kind: EKSCluster +metadata: + name: furyctl-dev-aws-al +spec: + infrastructure: + vpn: + ssh: + allowedFromCidrs: + - 0.0.0.0/0 + githubUsersName: + - jnardiello + publicKeys: + - ssh-ed25519 SomethingSomething engineering@sighup.io + vpnClientsSubnetCidr: 192.168.200.0/24 + kubernetes: + apiServer: + privateAccess: true + privateAccessCidrs: ["10.0.0.3/16"] + publicAccessCidrs: [] + publicAccess: false + vpcId: vpc-0123456789abcdef0 + subnetIds: + - subnet-0123456789abcdef0 + - subnet-0123456789abcdef1 + - subnet-0123456789abcdef2 + nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io + nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" + nodePools: + - ami: + id: ami-01234567890123456 + owner: "123456789012" + type: eks-managed + instance: + type: t3.large + name: worker-eks + size: + max: 3 + min: 2 + distribution: + customPatches: + configMapGenerator: + - name: a-configmap + files: + - /path/to/config.example + - name: b-configmap + envs: + - /path/to/envs.env + patches: + - target: + group: "" + version: v1 + kind: Service + name: cluster-autoscaler + namespace: kube-system + path: /path/to/patch.yaml + patchesStrategicMerge: + - | + --- + apiVersion: v1 + kind: Service + metadata: + labels: + label1: value1 + name: cluster-autoscaler + namespace: kube-system + secretGenerator: + - name: a-secret + files: + - /path/to/config.example + - name: b-secret + envs: + - /path/to/envs.env + common: + provider: + type: eks + modules: + aws: {} + dr: + type: eks + velero: + eks: + bucketName: example-velero + region: eu-west-1 + ingress: + baseDomain: furyctl-demo.sighup.io + dns: + private: + create: true + name: internal.furyctl-demo.sighup.io + public: + create: true + name: furyctl-demo.sighup.io + nginx: + type: single + tls: + provider: secret + secret: + ca: | + value + cert: | + value + key: | + value + logging: + type: opensearch + opensearch: + type: single + policy: + type: gatekeeper + gatekeeper: + additionalExcludedNamespaces: [] + installDefaultPolicies: true + enforcementAction: deny + distributionVersion: v1.24.1 + region: eu-west-1 + toolsConfiguration: + terraform: + state: + s3: + bucketName: furyctl-test-eks + keyPrefix: furyctl-test + region: eu-west-1 diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/012-ok.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/012-ok.yaml new file mode 100644 index 000000000..f5787979d --- /dev/null +++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/012-ok.yaml @@ -0,0 +1,157 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +# Tests the following cases: + +# Given "spec.kubernetes.nodePools.0.type" is 'eks-managed' and "spec.kubernetes.nodePools.0.ami.id" is not filled +# When I validate the config against the schema +# Then no errors are returned + +--- +apiVersion: kfd.sighup.io/v1alpha2 +kind: EKSCluster +metadata: + name: furyctl-dev-aws-al +spec: + infrastructure: + vpn: + vpcId: vpc-0123456789abcdef0 + ssh: + allowedFromCidrs: + - 0.0.0.0/0 + githubUsersName: + - jnardiello + publicKeys: + - ssh-ed25519 SomethingSomething engineering@sighup.io + vpnClientsSubnetCidr: 192.168.200.0/24 + kubernetes: + apiServer: + privateAccess: true + privateAccessCidrs: ["10.0.0.3/16"] + publicAccessCidrs: [] + publicAccess: false + vpcId: vpc-0123456789abcdef0 + subnetIds: + - subnet-0123456789abcdef0 + - subnet-0123456789abcdef1 + - subnet-0123456789abcdef2 + nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io + nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" + nodePools: + - ami: + id: ami-01234567890123456 + owner: "123456789012" + type: eks-managed + instance: + type: t3.large + name: worker-eks + size: + max: 3 + min: 2 + distribution: + customPatches: + configMapGenerator: + - name: a-configmap + files: + - /path/to/config.example + - name: b-configmap + envs: + - /path/to/envs.env + patches: + - target: + group: "" + version: v1 + kind: Service + name: cluster-autoscaler + namespace: kube-system + path: /path/to/patch.yaml + patchesStrategicMerge: + - | + --- + apiVersion: v1 + kind: Service + metadata: + labels: + label1: value1 + name: cluster-autoscaler + namespace: kube-system + secretGenerator: + - name: a-secret + files: + - /path/to/config.example + - name: b-secret + envs: + - /path/to/envs.env + common: + provider: + type: eks + modules: + aws: + clusterAutoscaler: + iamRoleArn: arn:aws:iam::123456789012:role/cluster-autoscaler + ebsCsiDriver: + iamRoleArn: arn:aws:iam::123456789012:role/ebs-csi-driver + loadBalancerController: + iamRoleArn: arn:aws:iam::123456789012:role/load-balancer-controller + overrides: {} + dr: + type: eks + velero: + eks: + bucketName: example-velero + region: eu-west-1 + iamRoleArn: arn:aws:iam::123456789012:role/velero + ingress: + baseDomain: furyctl-demo.sighup.io + dns: + private: + create: true + name: internal.furyctl-demo.sighup.io + vpcId: vpc-12345678901234567 + public: + create: true + name: furyctl-demo.sighup.io + nginx: + type: single + tls: + provider: secret + secret: + ca: | + value + cert: | + value + key: | + value + certManager: + clusterIssuer: + name: letsencrypt-fury + email: email@test.it + type: http01 + route53: + region: eu-west-1 + hostedZoneId: Z1234567890 + iamRoleArn: arn:aws:iam::123456789012:role/cert-manager + externalDns: + privateIamRoleArn: arn:aws:iam::123456789012:role/external-dns-private + publicIamRoleArn: arn:aws:iam::123456789012:role/external-dns-public + logging: + type: opensearch + opensearch: + type: single + policy: + type: gatekeeper + gatekeeper: + additionalExcludedNamespaces: [] + installDefaultPolicies: true + enforcementAction: deny + distributionVersion: v1.24.1 + region: eu-west-1 + toolsConfiguration: + terraform: + state: + s3: + bucketName: furyctl-test-eks + keyPrefix: furyctl-test + region: eu-west-1 diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/001-no.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/001-no.yaml index dccf16316..5593b9eea 100644 --- a/tests/schemas/public/ekscluster-kfd-v1alpha2/001-no.yaml +++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/001-no.yaml @@ -23,21 +23,21 @@ spec: cidr: 10.0.0.0/16 subnetsCidrs: private: - - 10.0.182.0/24 - - 10.0.172.0/24 - - 10.0.162.0/24 + - 10.0.182.0/24 + - 10.0.172.0/24 + - 10.0.162.0/24 public: - - 10.0.20.0/24 - - 10.0.30.0/24 - - 10.0.40.0/24 + - 10.0.20.0/24 + - 10.0.30.0/24 + - 10.0.40.0/24 vpn: ssh: allowedFromCidrs: - - 0.0.0.0/0 + - 0.0.0.0/0 githubUsersName: - - jnardiello + - jnardiello publicKeys: - - ssh-ed25519 SomethingSomething engineering@sighup.io + - ssh-ed25519 SomethingSomething engineering@sighup.io vpnClientsSubnetCidr: 192.168.200.0/24 kubernetes: apiServer: @@ -52,10 +52,12 @@ spec: - subnet-0123456789abcdef2 nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks @@ -88,9 +90,9 @@ spec: policy: type: gatekeeper gatekeeper: - additionalExcludedNamespaces: [] - installDefaultPolicies: true - enforcementAction: deny + additionalExcludedNamespaces: [] + installDefaultPolicies: true + enforcementAction: deny distributionVersion: v1.24.1 region: eu-west-1 toolsConfiguration: diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/001-ok.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/001-ok.yaml index 283074878..6dc006a1e 100644 --- a/tests/schemas/public/ekscluster-kfd-v1alpha2/001-ok.yaml +++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/001-ok.yaml @@ -22,21 +22,21 @@ spec: cidr: 10.0.0.0/16 subnetsCidrs: private: - - 10.0.182.0/24 - - 10.0.172.0/24 - - 10.0.162.0/24 + - 10.0.182.0/24 + - 10.0.172.0/24 + - 10.0.162.0/24 public: - - 10.0.20.0/24 - - 10.0.30.0/24 - - 10.0.40.0/24 + - 10.0.20.0/24 + - 10.0.30.0/24 + - 10.0.40.0/24 vpn: ssh: allowedFromCidrs: - - 0.0.0.0/0 + - 0.0.0.0/0 githubUsersName: - - jnardiello + - jnardiello publicKeys: - - ssh-ed25519 SomethingSomething engineering@sighup.io + - ssh-ed25519 SomethingSomething engineering@sighup.io vpnClientsSubnetCidr: 192.168.200.0/24 kubernetes: apiServer: @@ -46,10 +46,12 @@ spec: publicAccess: false nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks @@ -100,9 +102,9 @@ spec: policy: type: gatekeeper gatekeeper: - additionalExcludedNamespaces: [] - installDefaultPolicies: true - enforcementAction: deny + additionalExcludedNamespaces: [] + installDefaultPolicies: true + enforcementAction: deny distributionVersion: v1.24.1 region: eu-west-1 toolsConfiguration: diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/002-no.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/002-no.yaml index bc6313d7b..87c241192 100644 --- a/tests/schemas/public/ekscluster-kfd-v1alpha2/002-no.yaml +++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/002-no.yaml @@ -19,10 +19,12 @@ spec: kubernetes: nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks @@ -55,9 +57,9 @@ spec: policy: type: gatekeeper gatekeeper: - additionalExcludedNamespaces: [] - installDefaultPolicies: true - enforcementAction: deny + additionalExcludedNamespaces: [] + installDefaultPolicies: true + enforcementAction: deny distributionVersion: v1.24.1 region: eu-west-1 toolsConfiguration: diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/002-ok.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/002-ok.yaml index a866b71bf..d0488d417 100644 --- a/tests/schemas/public/ekscluster-kfd-v1alpha2/002-ok.yaml +++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/002-ok.yaml @@ -29,10 +29,12 @@ spec: - subnet-0123456789abcdef2 nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks @@ -70,9 +72,9 @@ spec: policy: type: gatekeeper gatekeeper: - additionalExcludedNamespaces: [] - installDefaultPolicies: true - enforcementAction: deny + additionalExcludedNamespaces: [] + installDefaultPolicies: true + enforcementAction: deny distributionVersion: v1.24.1 region: eu-west-1 toolsConfiguration: diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/003-no.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/003-no.yaml index 2af19bdc5..6571a4e9e 100644 --- a/tests/schemas/public/ekscluster-kfd-v1alpha2/003-no.yaml +++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/003-no.yaml @@ -22,21 +22,21 @@ spec: cidr: 10.0.0.0/16 subnetsCidrs: private: - - 10.0.182.0/24 - - 10.0.172.0/24 - - 10.0.162.0/24 + - 10.0.182.0/24 + - 10.0.172.0/24 + - 10.0.162.0/24 public: - - 10.0.20.0/24 - - 10.0.30.0/24 - - 10.0.40.0/24 + - 10.0.20.0/24 + - 10.0.30.0/24 + - 10.0.40.0/24 vpn: ssh: allowedFromCidrs: - - 0.0.0.0/0 + - 0.0.0.0/0 githubUsersName: - - jnardiello + - jnardiello publicKeys: - - ssh-ed25519 SomethingSomething engineering@sighup.io + - ssh-ed25519 SomethingSomething engineering@sighup.io vpnClientsSubnetCidr: 192.168.200.0/24 kubernetes: apiServer: @@ -46,10 +46,12 @@ spec: publicAccess: false nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks @@ -94,9 +96,9 @@ spec: policy: type: gatekeeper gatekeeper: - additionalExcludedNamespaces: [] - installDefaultPolicies: true - enforcementAction: deny + additionalExcludedNamespaces: [] + installDefaultPolicies: true + enforcementAction: deny distributionVersion: v1.24.1 region: eu-west-1 toolsConfiguration: diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/003-ok.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/003-ok.yaml index a350548cb..3676cf950 100644 --- a/tests/schemas/public/ekscluster-kfd-v1alpha2/003-ok.yaml +++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/003-ok.yaml @@ -22,21 +22,21 @@ spec: cidr: 10.0.0.0/16 subnetsCidrs: private: - - 10.0.182.0/24 - - 10.0.172.0/24 - - 10.0.162.0/24 + - 10.0.182.0/24 + - 10.0.172.0/24 + - 10.0.162.0/24 public: - - 10.0.20.0/24 - - 10.0.30.0/24 - - 10.0.40.0/24 + - 10.0.20.0/24 + - 10.0.30.0/24 + - 10.0.40.0/24 vpn: ssh: allowedFromCidrs: - - 0.0.0.0/0 + - 0.0.0.0/0 githubUsersName: - - jnardiello + - jnardiello publicKeys: - - ssh-ed25519 SomethingSomething engineering@sighup.io + - ssh-ed25519 SomethingSomething engineering@sighup.io vpnClientsSubnetCidr: 192.168.200.0/24 kubernetes: apiServer: @@ -46,10 +46,12 @@ spec: publicAccess: false nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks @@ -100,9 +102,9 @@ spec: policy: type: gatekeeper gatekeeper: - additionalExcludedNamespaces: [] - installDefaultPolicies: true - enforcementAction: deny + additionalExcludedNamespaces: [] + installDefaultPolicies: true + enforcementAction: deny distributionVersion: v1.24.1 region: eu-west-1 toolsConfiguration: diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/004-no.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/004-no.yaml index 92cae2ef7..c9015eed3 100644 --- a/tests/schemas/public/ekscluster-kfd-v1alpha2/004-no.yaml +++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/004-no.yaml @@ -21,21 +21,21 @@ spec: cidr: 10.0.0.0/16 subnetsCidrs: private: - - 10.0.182.0/24 - - 10.0.172.0/24 - - 10.0.162.0/24 + - 10.0.182.0/24 + - 10.0.172.0/24 + - 10.0.162.0/24 public: - - 10.0.20.0/24 - - 10.0.30.0/24 - - 10.0.40.0/24 + - 10.0.20.0/24 + - 10.0.30.0/24 + - 10.0.40.0/24 vpn: ssh: allowedFromCidrs: - - 0.0.0.0/0 + - 0.0.0.0/0 githubUsersName: - - jnardiello + - jnardiello publicKeys: - - ssh-ed25519 SomethingSomething engineering@sighup.io + - ssh-ed25519 SomethingSomething engineering@sighup.io vpnClientsSubnetCidr: 192.168.200.0/24 kubernetes: apiServer: @@ -45,10 +45,12 @@ spec: publicAccess: false nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks @@ -84,9 +86,9 @@ spec: policy: type: gatekeeper gatekeeper: - additionalExcludedNamespaces: [] - installDefaultPolicies: true - enforcementAction: deny + additionalExcludedNamespaces: [] + installDefaultPolicies: true + enforcementAction: deny distributionVersion: v1.24.1 region: eu-west-1 toolsConfiguration: diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/004-ok.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/004-ok.yaml index eaa51a27c..f532e7dd4 100644 --- a/tests/schemas/public/ekscluster-kfd-v1alpha2/004-ok.yaml +++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/004-ok.yaml @@ -21,21 +21,21 @@ spec: cidr: 10.0.0.0/16 subnetsCidrs: private: - - 10.0.182.0/24 - - 10.0.172.0/24 - - 10.0.162.0/24 + - 10.0.182.0/24 + - 10.0.172.0/24 + - 10.0.162.0/24 public: - - 10.0.20.0/24 - - 10.0.30.0/24 - - 10.0.40.0/24 + - 10.0.20.0/24 + - 10.0.30.0/24 + - 10.0.40.0/24 vpn: ssh: allowedFromCidrs: - - 0.0.0.0/0 + - 0.0.0.0/0 githubUsersName: - - jnardiello + - jnardiello publicKeys: - - ssh-ed25519 SomethingSomething engineering@sighup.io + - ssh-ed25519 SomethingSomething engineering@sighup.io vpnClientsSubnetCidr: 192.168.200.0/24 kubernetes: apiServer: @@ -45,10 +45,12 @@ spec: publicAccess: false nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks @@ -92,9 +94,9 @@ spec: policy: type: gatekeeper gatekeeper: - additionalExcludedNamespaces: [] - installDefaultPolicies: true - enforcementAction: deny + additionalExcludedNamespaces: [] + installDefaultPolicies: true + enforcementAction: deny distributionVersion: v1.24.1 region: eu-west-1 toolsConfiguration: diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/005-no.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/005-no.yaml index d16b0889b..b1cb81933 100644 --- a/tests/schemas/public/ekscluster-kfd-v1alpha2/005-no.yaml +++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/005-no.yaml @@ -21,21 +21,21 @@ spec: cidr: 10.0.0.0/16 subnetsCidrs: private: - - 10.0.182.0/24 - - 10.0.172.0/24 - - 10.0.162.0/24 + - 10.0.182.0/24 + - 10.0.172.0/24 + - 10.0.162.0/24 public: - - 10.0.20.0/24 - - 10.0.30.0/24 - - 10.0.40.0/24 + - 10.0.20.0/24 + - 10.0.30.0/24 + - 10.0.40.0/24 vpn: ssh: allowedFromCidrs: - - 0.0.0.0/0 + - 0.0.0.0/0 githubUsersName: - - jnardiello + - jnardiello publicKeys: - - ssh-ed25519 SomethingSomething engineering@sighup.io + - ssh-ed25519 SomethingSomething engineering@sighup.io vpnClientsSubnetCidr: 192.168.200.0/24 kubernetes: apiServer: @@ -45,10 +45,12 @@ spec: publicAccess: false nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks @@ -91,9 +93,9 @@ spec: policy: type: gatekeeper gatekeeper: - additionalExcludedNamespaces: [] - installDefaultPolicies: true - enforcementAction: deny + additionalExcludedNamespaces: [] + installDefaultPolicies: true + enforcementAction: deny distributionVersion: v1.24.1 region: eu-west-1 toolsConfiguration: diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/005-ok.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/005-ok.yaml index 7d1708738..b17f26041 100644 --- a/tests/schemas/public/ekscluster-kfd-v1alpha2/005-ok.yaml +++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/005-ok.yaml @@ -21,21 +21,21 @@ spec: cidr: 10.0.0.0/16 subnetsCidrs: private: - - 10.0.182.0/24 - - 10.0.172.0/24 - - 10.0.162.0/24 + - 10.0.182.0/24 + - 10.0.172.0/24 + - 10.0.162.0/24 public: - - 10.0.20.0/24 - - 10.0.30.0/24 - - 10.0.40.0/24 + - 10.0.20.0/24 + - 10.0.30.0/24 + - 10.0.40.0/24 vpn: ssh: allowedFromCidrs: - - 0.0.0.0/0 + - 0.0.0.0/0 githubUsersName: - - jnardiello + - jnardiello publicKeys: - - ssh-ed25519 SomethingSomething engineering@sighup.io + - ssh-ed25519 SomethingSomething engineering@sighup.io vpnClientsSubnetCidr: 192.168.200.0/24 kubernetes: apiServer: @@ -45,10 +45,12 @@ spec: publicAccess: false nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks @@ -86,9 +88,9 @@ spec: policy: type: gatekeeper gatekeeper: - additionalExcludedNamespaces: [] - installDefaultPolicies: true - enforcementAction: deny + additionalExcludedNamespaces: [] + installDefaultPolicies: true + enforcementAction: deny distributionVersion: v1.24.1 region: eu-west-1 toolsConfiguration: diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/006-no.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/006-no.yaml index c7db71bae..59ed50619 100644 --- a/tests/schemas/public/ekscluster-kfd-v1alpha2/006-no.yaml +++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/006-no.yaml @@ -11,99 +11,101 @@ # And the error "missing properties: 'aws'" is returned --- - apiVersion: kfd.sighup.io/v1alpha2 - kind: EKSCluster - metadata: - name: furyctl-dev-aws-al - spec: - infrastructure: - vpc: - network: - cidr: 10.0.0.0/16 - subnetsCidrs: - private: +apiVersion: kfd.sighup.io/v1alpha2 +kind: EKSCluster +metadata: + name: furyctl-dev-aws-al +spec: + infrastructure: + vpc: + network: + cidr: 10.0.0.0/16 + subnetsCidrs: + private: - 10.0.182.0/24 - 10.0.172.0/24 - 10.0.162.0/24 - public: + public: - 10.0.20.0/24 - 10.0.30.0/24 - 10.0.40.0/24 - vpn: - ssh: - allowedFromCidrs: + vpn: + ssh: + allowedFromCidrs: - 0.0.0.0/0 - githubUsersName: + githubUsersName: - jnardiello - publicKeys: + publicKeys: - ssh-ed25519 SomethingSomething engineering@sighup.io - vpnClientsSubnetCidr: 192.168.200.0/24 - kubernetes: - apiServer: - privateAccess: true - privateAccessCidrs: ["0.0.0.0/0"] - publicAccessCidrs: [] - publicAccess: false - nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io - nodePoolsLaunchKind: both - nodePools: - - ami: - id: ami-01234567890123456 - owner: "123456789012" - instance: - type: t3.large - name: worker-eks - size: - max: 3 - min: 2 - distribution: - common: + vpnClientsSubnetCidr: 192.168.200.0/24 + kubernetes: + apiServer: + privateAccess: true + privateAccessCidrs: ["0.0.0.0/0"] + publicAccessCidrs: [] + publicAccess: false + nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io + nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" + nodePools: + - ami: + id: ami-01234567890123456 + owner: "123456789012" + type: self-managed + instance: + type: t3.large + name: worker-eks + size: + max: 3 + min: 2 + distribution: + common: + provider: + type: eks + modules: + auth: provider: - type: eks - modules: - auth: - provider: - type: basicAuth - basicAuth: - password: admin - username: admin - dr: - type: eks - velero: - eks: - bucketName: example-velero - iamRoleArn: arn:aws:iam::123456789012:role/example-velero - region: eu-west-1 - ingress: - baseDomain: furyctl-demo.sighup.io - dns: - private: - create: true - name: internal.furyctl-demo.sighup.io - vpcId: "" - public: - create: true - name: furyctl-demo.sighup.io - nginx: - type: single - tls: - provider: secret - logging: - type: opensearch - opensearch: - type: single - policy: - type: gatekeeper - gatekeeper: - additionalExcludedNamespaces: [] - installDefaultPolicies: true - enforcementAction: deny - distributionVersion: v1.24.1 - region: eu-west-1 - toolsConfiguration: - terraform: - state: - s3: - bucketName: furyctl-test-eks - keyPrefix: furyctl-test + type: basicAuth + basicAuth: + password: admin + username: admin + dr: + type: eks + velero: + eks: + bucketName: example-velero + iamRoleArn: arn:aws:iam::123456789012:role/example-velero region: eu-west-1 + ingress: + baseDomain: furyctl-demo.sighup.io + dns: + private: + create: true + name: internal.furyctl-demo.sighup.io + vpcId: "" + public: + create: true + name: furyctl-demo.sighup.io + nginx: + type: single + tls: + provider: secret + logging: + type: opensearch + opensearch: + type: single + policy: + type: gatekeeper + gatekeeper: + additionalExcludedNamespaces: [] + installDefaultPolicies: true + enforcementAction: deny + distributionVersion: v1.24.1 + region: eu-west-1 + toolsConfiguration: + terraform: + state: + s3: + bucketName: furyctl-test-eks + keyPrefix: furyctl-test + region: eu-west-1 diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/006-ok.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/006-ok.yaml index 6c9404d12..aadc3b4ba 100644 --- a/tests/schemas/public/ekscluster-kfd-v1alpha2/006-ok.yaml +++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/006-ok.yaml @@ -21,21 +21,21 @@ spec: cidr: 10.0.0.0/16 subnetsCidrs: private: - - 10.0.182.0/24 - - 10.0.172.0/24 - - 10.0.162.0/24 + - 10.0.182.0/24 + - 10.0.172.0/24 + - 10.0.162.0/24 public: - - 10.0.20.0/24 - - 10.0.30.0/24 - - 10.0.40.0/24 + - 10.0.20.0/24 + - 10.0.30.0/24 + - 10.0.40.0/24 vpn: ssh: allowedFromCidrs: - - 0.0.0.0/0 + - 0.0.0.0/0 githubUsersName: - - jnardiello + - jnardiello publicKeys: - - ssh-ed25519 SomethingSomething engineering@sighup.io + - ssh-ed25519 SomethingSomething engineering@sighup.io vpnClientsSubnetCidr: 192.168.200.0/24 kubernetes: apiServer: @@ -45,10 +45,12 @@ spec: publicAccess: false nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks @@ -90,9 +92,9 @@ spec: policy: type: gatekeeper gatekeeper: - additionalExcludedNamespaces: [] - installDefaultPolicies: true - enforcementAction: deny + additionalExcludedNamespaces: [] + installDefaultPolicies: true + enforcementAction: deny distributionVersion: v1.24.1 region: eu-west-1 toolsConfiguration: diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/007-no.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/007-no.yaml index 3268afb94..ff0ed51dd 100644 --- a/tests/schemas/public/ekscluster-kfd-v1alpha2/007-no.yaml +++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/007-no.yaml @@ -10,97 +10,99 @@ # Then an error "missing properties: 'aws'" is returned --- - apiVersion: kfd.sighup.io/v1alpha2 - kind: EKSCluster - metadata: - name: furyctl-dev-aws-al - spec: - infrastructure: - vpc: - network: - cidr: 10.0.0.0/16 - subnetsCidrs: - private: +apiVersion: kfd.sighup.io/v1alpha2 +kind: EKSCluster +metadata: + name: furyctl-dev-aws-al +spec: + infrastructure: + vpc: + network: + cidr: 10.0.0.0/16 + subnetsCidrs: + private: - 10.0.182.0/24 - 10.0.172.0/24 - 10.0.162.0/24 - public: + public: - 10.0.20.0/24 - 10.0.30.0/24 - 10.0.40.0/24 - vpn: - ssh: - allowedFromCidrs: + vpn: + ssh: + allowedFromCidrs: - 0.0.0.0/0 - githubUsersName: + githubUsersName: - jnardiello - publicKeys: + publicKeys: - ssh-ed25519 SomethingSomething engineering@sighup.io - vpnClientsSubnetCidr: 192.168.200.0/24 - kubernetes: - apiServer: - privateAccess: true - privateAccessCidrs: ["0.0.0.0/0"] - publicAccessCidrs: [] - publicAccess: false - nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io - nodePoolsLaunchKind: both - nodePools: - - ami: - id: ami-01234567890123456 - owner: "123456789012" - instance: - type: t3.large - name: worker-eks - size: - max: 3 - min: 2 - distribution: - common: + vpnClientsSubnetCidr: 192.168.200.0/24 + kubernetes: + apiServer: + privateAccess: true + privateAccessCidrs: ["0.0.0.0/0"] + publicAccessCidrs: [] + publicAccess: false + nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io + nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" + nodePools: + - ami: + id: ami-01234567890123456 + owner: "123456789012" + type: self-managed + instance: + type: t3.large + name: worker-eks + size: + max: 3 + min: 2 + distribution: + common: + provider: + type: eks + modules: + auth: provider: - type: eks - modules: - auth: - provider: - type: basicAuth - basicAuth: - password: admin - username: admin - dr: - type: eks - velero: - eks: - bucketName: example-velero - iamRoleArn: arn:aws:iam::123456789012:role/example-velero - region: eu-west-1 - ingress: - baseDomain: furyctl-demo.sighup.io - dns: - private: - create: true - name: internal.furyctl-demo.sighup.io - vpcId: "" - public: - create: true - name: furyctl-demo.sighup.io - nginx: - type: single - logging: - type: opensearch - opensearch: - type: single - policy: - type: gatekeeper - gatekeeper: - additionalExcludedNamespaces: [] - installDefaultPolicies: true - enforcementAction: deny - distributionVersion: v1.24.1 - region: eu-west-1 - toolsConfiguration: - terraform: - state: - s3: - bucketName: furyctl-test-eks - keyPrefix: furyctl-test + type: basicAuth + basicAuth: + password: admin + username: admin + dr: + type: eks + velero: + eks: + bucketName: example-velero + iamRoleArn: arn:aws:iam::123456789012:role/example-velero region: eu-west-1 + ingress: + baseDomain: furyctl-demo.sighup.io + dns: + private: + create: true + name: internal.furyctl-demo.sighup.io + vpcId: "" + public: + create: true + name: furyctl-demo.sighup.io + nginx: + type: single + logging: + type: opensearch + opensearch: + type: single + policy: + type: gatekeeper + gatekeeper: + additionalExcludedNamespaces: [] + installDefaultPolicies: true + enforcementAction: deny + distributionVersion: v1.24.1 + region: eu-west-1 + toolsConfiguration: + terraform: + state: + s3: + bucketName: furyctl-test-eks + keyPrefix: furyctl-test + region: eu-west-1 diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/007-ok.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/007-ok.yaml index 1833dbc70..7c7798b76 100644 --- a/tests/schemas/public/ekscluster-kfd-v1alpha2/007-ok.yaml +++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/007-ok.yaml @@ -21,21 +21,21 @@ spec: cidr: 10.0.0.0/16 subnetsCidrs: private: - - 10.0.182.0/24 - - 10.0.172.0/24 - - 10.0.162.0/24 + - 10.0.182.0/24 + - 10.0.172.0/24 + - 10.0.162.0/24 public: - - 10.0.20.0/24 - - 10.0.30.0/24 - - 10.0.40.0/24 + - 10.0.20.0/24 + - 10.0.30.0/24 + - 10.0.40.0/24 vpn: ssh: allowedFromCidrs: - - 0.0.0.0/0 + - 0.0.0.0/0 githubUsersName: - - jnardiello + - jnardiello publicKeys: - - ssh-ed25519 SomethingSomething engineering@sighup.io + - ssh-ed25519 SomethingSomething engineering@sighup.io vpnClientsSubnetCidr: 192.168.200.0/24 kubernetes: apiServer: @@ -45,10 +45,12 @@ spec: publicAccess: false nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks @@ -94,9 +96,9 @@ spec: policy: type: gatekeeper gatekeeper: - additionalExcludedNamespaces: [] - installDefaultPolicies: true - enforcementAction: deny + additionalExcludedNamespaces: [] + installDefaultPolicies: true + enforcementAction: deny distributionVersion: v1.24.1 region: eu-west-1 toolsConfiguration: diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/008-no.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/008-no.yaml index 2b1d7ce27..5b5bb45f6 100644 --- a/tests/schemas/public/ekscluster-kfd-v1alpha2/008-no.yaml +++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/008-no.yaml @@ -21,21 +21,21 @@ spec: cidr: 10.0.0.0/16 subnetsCidrs: private: - - 10.0.182.0/24 - - 10.0.172.0/24 - - 10.0.162.0/24 + - 10.0.182.0/24 + - 10.0.172.0/24 + - 10.0.162.0/24 public: - - 10.0.20.0/24 - - 10.0.30.0/24 - - 10.0.40.0/24 + - 10.0.20.0/24 + - 10.0.30.0/24 + - 10.0.40.0/24 vpn: ssh: allowedFromCidrs: - - 0.0.0.0/0 + - 0.0.0.0/0 githubUsersName: - - jnardiello + - jnardiello publicKeys: - - ssh-ed25519 SomethingSomething engineering@sighup.io + - ssh-ed25519 SomethingSomething engineering@sighup.io vpnClientsSubnetCidr: 192.168.200.0/24 kubernetes: apiServer: @@ -45,10 +45,12 @@ spec: publicAccess: false nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks @@ -58,42 +60,42 @@ spec: distribution: customPatches: configMapGenerator: - - name: a-configmap - files: - - /path/to/config.example - - name: b-configmap - envs: - - /path/to/envs.env + - name: a-configmap + files: + - /path/to/config.example + - name: b-configmap + envs: + - /path/to/envs.env patches: - - target: - group: "" - version: v1 + - target: + group: "" + version: v1 + kind: Service + name: cluster-autoscaler + namespace: kube-system + path: /path/to/patch.yaml + patch: | + - op: add + path: /metadata/labels + value: + label1: value1 + patchesStrategicMerge: + - | + --- + apiVersion: v1 kind: Service - name: cluster-autoscaler - namespace: kube-system - path: /path/to/patch.yaml - patch: | - - op: add - path: /metadata/labels - value: + metadata: + labels: label1: value1 - patchesStrategicMerge: - - | - --- - apiVersion: v1 - kind: Service - metadata: - labels: - label1: value1 - name: cluster-autoscaler - namespace: kube-system + name: cluster-autoscaler + namespace: kube-system secretGenerator: - - name: a-secret - files: - - /path/to/config.example - - name: b-secret - envs: - - /path/to/envs.env + - name: a-secret + files: + - /path/to/config.example + - name: b-secret + envs: + - /path/to/envs.env common: provider: type: eks @@ -132,9 +134,9 @@ spec: policy: type: gatekeeper gatekeeper: - additionalExcludedNamespaces: [] - installDefaultPolicies: true - enforcementAction: deny + additionalExcludedNamespaces: [] + installDefaultPolicies: true + enforcementAction: deny distributionVersion: v1.24.1 region: eu-west-1 toolsConfiguration: diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/008-ok.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/008-ok.yaml index 431e31a7f..4376557a4 100644 --- a/tests/schemas/public/ekscluster-kfd-v1alpha2/008-ok.yaml +++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/008-ok.yaml @@ -20,21 +20,21 @@ spec: cidr: 10.0.0.0/16 subnetsCidrs: private: - - 10.0.182.0/24 - - 10.0.172.0/24 - - 10.0.162.0/24 + - 10.0.182.0/24 + - 10.0.172.0/24 + - 10.0.162.0/24 public: - - 10.0.20.0/24 - - 10.0.30.0/24 - - 10.0.40.0/24 + - 10.0.20.0/24 + - 10.0.30.0/24 + - 10.0.40.0/24 vpn: ssh: allowedFromCidrs: - - 0.0.0.0/0 + - 0.0.0.0/0 githubUsersName: - - jnardiello + - jnardiello publicKeys: - - ssh-ed25519 SomethingSomething engineering@sighup.io + - ssh-ed25519 SomethingSomething engineering@sighup.io vpnClientsSubnetCidr: 192.168.200.0/24 kubernetes: apiServer: @@ -44,10 +44,12 @@ spec: publicAccess: false nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks @@ -57,37 +59,37 @@ spec: distribution: customPatches: configMapGenerator: - - name: a-configmap - files: - - /path/to/config.example - - name: b-configmap - envs: - - /path/to/envs.env + - name: a-configmap + files: + - /path/to/config.example + - name: b-configmap + envs: + - /path/to/envs.env patches: - - target: - group: "" - version: v1 - kind: Service - name: cluster-autoscaler - namespace: kube-system - path: /path/to/patch.yaml + - target: + group: "" + version: v1 + kind: Service + name: cluster-autoscaler + namespace: kube-system + path: /path/to/patch.yaml patchesStrategicMerge: - - | - --- - apiVersion: v1 - kind: Service - metadata: - labels: - label1: value1 - name: cluster-autoscaler - namespace: kube-system + - | + --- + apiVersion: v1 + kind: Service + metadata: + labels: + label1: value1 + name: cluster-autoscaler + namespace: kube-system secretGenerator: - - name: a-secret - files: - - /path/to/config.example - - name: b-secret - envs: - - /path/to/envs.env + - name: a-secret + files: + - /path/to/config.example + - name: b-secret + envs: + - /path/to/envs.env common: provider: type: eks @@ -126,9 +128,9 @@ spec: policy: type: gatekeeper gatekeeper: - additionalExcludedNamespaces: [] - installDefaultPolicies: true - enforcementAction: deny + additionalExcludedNamespaces: [] + installDefaultPolicies: true + enforcementAction: deny distributionVersion: v1.24.1 region: eu-west-1 toolsConfiguration: diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/009-no.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/009-no.yaml index 45b77773f..7bc5e19aa 100644 --- a/tests/schemas/public/ekscluster-kfd-v1alpha2/009-no.yaml +++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/009-no.yaml @@ -21,21 +21,21 @@ spec: cidr: 10.0.0.0/16 subnetsCidrs: private: - - 10.0.182.0/24 - - 10.0.172.0/24 - - 10.0.162.0/24 + - 10.0.182.0/24 + - 10.0.172.0/24 + - 10.0.162.0/24 public: - - 10.0.20.0/24 - - 10.0.30.0/24 - - 10.0.40.0/24 + - 10.0.20.0/24 + - 10.0.30.0/24 + - 10.0.40.0/24 vpn: ssh: allowedFromCidrs: - - 0.0.0.0/0 + - 0.0.0.0/0 githubUsersName: - - jnardiello + - jnardiello publicKeys: - - ssh-ed25519 SomethingSomething engineering@sighup.io + - ssh-ed25519 SomethingSomething engineering@sighup.io vpnClientsSubnetCidr: 192.168.200.0/24 kubernetes: apiServer: @@ -45,10 +45,12 @@ spec: publicAccess: false nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks @@ -58,39 +60,39 @@ spec: distribution: customPatches: configMapGenerator: - - name: a-configmap - files: - - /path/to/config.example - type: Opaque - - name: b-configmap - envs: - - /path/to/envs.env + - name: a-configmap + files: + - /path/to/config.example + type: Opaque + - name: b-configmap + envs: + - /path/to/envs.env patches: - - target: - group: "" - version: v1 - kind: Service - name: cluster-autoscaler - namespace: kube-system - path: /path/to/patch.yaml + - target: + group: "" + version: v1 + kind: Service + name: cluster-autoscaler + namespace: kube-system + path: /path/to/patch.yaml patchesStrategicMerge: - - | - --- - apiVersion: v1 - kind: Service - metadata: - labels: - label1: value1 - name: cluster-autoscaler - namespace: kube-system + - | + --- + apiVersion: v1 + kind: Service + metadata: + labels: + label1: value1 + name: cluster-autoscaler + namespace: kube-system secretGenerator: - - name: a-secret - files: - - /path/to/config.example - - name: b-secret - envs: - - /path/to/envs.env - type: Opaque + - name: a-secret + files: + - /path/to/config.example + - name: b-secret + envs: + - /path/to/envs.env + type: Opaque common: provider: type: eks @@ -129,9 +131,9 @@ spec: policy: type: gatekeeper gatekeeper: - additionalExcludedNamespaces: [] - installDefaultPolicies: true - enforcementAction: deny + additionalExcludedNamespaces: [] + installDefaultPolicies: true + enforcementAction: deny distributionVersion: v1.24.1 region: eu-west-1 toolsConfiguration: diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/009-ok.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/009-ok.yaml index b24fec63b..242de8a65 100644 --- a/tests/schemas/public/ekscluster-kfd-v1alpha2/009-ok.yaml +++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/009-ok.yaml @@ -20,21 +20,21 @@ spec: cidr: 10.0.0.0/16 subnetsCidrs: private: - - 10.0.182.0/24 - - 10.0.172.0/24 - - 10.0.162.0/24 + - 10.0.182.0/24 + - 10.0.172.0/24 + - 10.0.162.0/24 public: - - 10.0.20.0/24 - - 10.0.30.0/24 - - 10.0.40.0/24 + - 10.0.20.0/24 + - 10.0.30.0/24 + - 10.0.40.0/24 vpn: ssh: allowedFromCidrs: - - 0.0.0.0/0 + - 0.0.0.0/0 githubUsersName: - - jnardiello + - jnardiello publicKeys: - - ssh-ed25519 SomethingSomething engineering@sighup.io + - ssh-ed25519 SomethingSomething engineering@sighup.io vpnClientsSubnetCidr: 192.168.200.0/24 kubernetes: apiServer: @@ -44,10 +44,12 @@ spec: publicAccess: false nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks @@ -57,38 +59,38 @@ spec: distribution: customPatches: configMapGenerator: - - name: a-configmap - files: - - /path/to/config.example - - name: b-configmap - envs: - - /path/to/envs.env + - name: a-configmap + files: + - /path/to/config.example + - name: b-configmap + envs: + - /path/to/envs.env patches: - - target: - group: "" - version: v1 - kind: Service - name: cluster-autoscaler - namespace: kube-system - path: /path/to/patch.yaml + - target: + group: "" + version: v1 + kind: Service + name: cluster-autoscaler + namespace: kube-system + path: /path/to/patch.yaml patchesStrategicMerge: - - | - --- - apiVersion: v1 - kind: Service - metadata: - labels: - label1: value1 - name: cluster-autoscaler - namespace: kube-system + - | + --- + apiVersion: v1 + kind: Service + metadata: + labels: + label1: value1 + name: cluster-autoscaler + namespace: kube-system secretGenerator: - - name: a-secret - files: - - /path/to/config.example - - name: b-secret - envs: - - /path/to/envs.env - type: Opaque + - name: a-secret + files: + - /path/to/config.example + - name: b-secret + envs: + - /path/to/envs.env + type: Opaque common: provider: type: eks @@ -127,9 +129,9 @@ spec: policy: type: gatekeeper gatekeeper: - additionalExcludedNamespaces: [] - installDefaultPolicies: true - enforcementAction: deny + additionalExcludedNamespaces: [] + installDefaultPolicies: true + enforcementAction: deny distributionVersion: v1.24.1 region: eu-west-1 toolsConfiguration: diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/010-no.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/010-no.yaml index 713374ed3..29cf21dbc 100644 --- a/tests/schemas/public/ekscluster-kfd-v1alpha2/010-no.yaml +++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/010-no.yaml @@ -20,22 +20,22 @@ spec: cidr: 10.0.0.0/16 subnetsCidrs: private: - - 10.0.182.0/24 - - 10.0.172.0/24 - - 10.0.162.0/24 + - 10.0.182.0/24 + - 10.0.172.0/24 + - 10.0.162.0/24 public: - - 10.0.20.0/24 - - 10.0.30.0/24 - - 10.0.40.0/24 + - 10.0.20.0/24 + - 10.0.30.0/24 + - 10.0.40.0/24 vpn: vpcId: vpc-0123456789abcdef0 ssh: allowedFromCidrs: - - 0.0.0.0/0 + - 0.0.0.0/0 githubUsersName: - - jnardiello + - jnardiello publicKeys: - - ssh-ed25519 SomethingSomething engineering@sighup.io + - ssh-ed25519 SomethingSomething engineering@sighup.io vpnClientsSubnetCidr: 192.168.200.0/24 kubernetes: apiServer: @@ -45,10 +45,12 @@ spec: publicAccess: false nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks @@ -58,37 +60,37 @@ spec: distribution: customPatches: configMapGenerator: - - name: a-configmap - files: - - /path/to/config.example - - name: b-configmap - envs: - - /path/to/envs.env + - name: a-configmap + files: + - /path/to/config.example + - name: b-configmap + envs: + - /path/to/envs.env patches: - - target: - group: "" - version: v1 - kind: Service - name: cluster-autoscaler - namespace: kube-system - path: /path/to/patch.yaml + - target: + group: "" + version: v1 + kind: Service + name: cluster-autoscaler + namespace: kube-system + path: /path/to/patch.yaml patchesStrategicMerge: - - | - --- - apiVersion: v1 - kind: Service - metadata: - labels: - label1: value1 - name: cluster-autoscaler - namespace: kube-system + - | + --- + apiVersion: v1 + kind: Service + metadata: + labels: + label1: value1 + name: cluster-autoscaler + namespace: kube-system secretGenerator: - - name: a-secret - files: - - /path/to/config.example - - name: b-secret - envs: - - /path/to/envs.env + - name: a-secret + files: + - /path/to/config.example + - name: b-secret + envs: + - /path/to/envs.env common: provider: type: eks @@ -127,9 +129,9 @@ spec: policy: type: gatekeeper gatekeeper: - additionalExcludedNamespaces: [] - installDefaultPolicies: true - enforcementAction: deny + additionalExcludedNamespaces: [] + installDefaultPolicies: true + enforcementAction: deny distributionVersion: v1.24.1 region: eu-west-1 toolsConfiguration: diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/010-ok.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/010-ok.yaml index ab81e02e3..ac7701cfe 100644 --- a/tests/schemas/public/ekscluster-kfd-v1alpha2/010-ok.yaml +++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/010-ok.yaml @@ -20,21 +20,21 @@ spec: cidr: 10.0.0.0/16 subnetsCidrs: private: - - 10.0.182.0/24 - - 10.0.172.0/24 - - 10.0.162.0/24 + - 10.0.182.0/24 + - 10.0.172.0/24 + - 10.0.162.0/24 public: - - 10.0.20.0/24 - - 10.0.30.0/24 - - 10.0.40.0/24 + - 10.0.20.0/24 + - 10.0.30.0/24 + - 10.0.40.0/24 vpn: ssh: allowedFromCidrs: - - 0.0.0.0/0 + - 0.0.0.0/0 githubUsersName: - - jnardiello + - jnardiello publicKeys: - - ssh-ed25519 SomethingSomething engineering@sighup.io + - ssh-ed25519 SomethingSomething engineering@sighup.io vpnClientsSubnetCidr: 192.168.200.0/24 kubernetes: apiServer: @@ -44,10 +44,12 @@ spec: publicAccess: false nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks @@ -57,37 +59,37 @@ spec: distribution: customPatches: configMapGenerator: - - name: a-configmap - files: - - /path/to/config.example - - name: b-configmap - envs: - - /path/to/envs.env + - name: a-configmap + files: + - /path/to/config.example + - name: b-configmap + envs: + - /path/to/envs.env patches: - - target: - group: "" - version: v1 - kind: Service - name: cluster-autoscaler - namespace: kube-system - path: /path/to/patch.yaml + - target: + group: "" + version: v1 + kind: Service + name: cluster-autoscaler + namespace: kube-system + path: /path/to/patch.yaml patchesStrategicMerge: - - | - --- - apiVersion: v1 - kind: Service - metadata: - labels: - label1: value1 - name: cluster-autoscaler - namespace: kube-system + - | + --- + apiVersion: v1 + kind: Service + metadata: + labels: + label1: value1 + name: cluster-autoscaler + namespace: kube-system secretGenerator: - - name: a-secret - files: - - /path/to/config.example - - name: b-secret - envs: - - /path/to/envs.env + - name: a-secret + files: + - /path/to/config.example + - name: b-secret + envs: + - /path/to/envs.env common: provider: type: eks @@ -126,9 +128,9 @@ spec: policy: type: gatekeeper gatekeeper: - additionalExcludedNamespaces: [] - installDefaultPolicies: true - enforcementAction: deny + additionalExcludedNamespaces: [] + installDefaultPolicies: true + enforcementAction: deny distributionVersion: v1.24.1 region: eu-west-1 toolsConfiguration: diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/011-no.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/011-no.yaml index e83816630..724b2849c 100644 --- a/tests/schemas/public/ekscluster-kfd-v1alpha2/011-no.yaml +++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/011-no.yaml @@ -18,11 +18,11 @@ spec: vpn: ssh: allowedFromCidrs: - - 0.0.0.0/0 + - 0.0.0.0/0 githubUsersName: - - jnardiello + - jnardiello publicKeys: - - ssh-ed25519 SomethingSomething engineering@sighup.io + - ssh-ed25519 SomethingSomething engineering@sighup.io vpnClientsSubnetCidr: 192.168.200.0/24 kubernetes: apiServer: @@ -37,10 +37,12 @@ spec: - subnet-0123456789abcdef2 nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks @@ -50,42 +52,42 @@ spec: distribution: customPatches: configMapGenerator: - - name: a-configmap - files: - - /path/to/config.example - - name: b-configmap - envs: - - /path/to/envs.env + - name: a-configmap + files: + - /path/to/config.example + - name: b-configmap + envs: + - /path/to/envs.env patches: - - target: - group: "" - version: v1 - kind: Service - name: cluster-autoscaler - namespace: kube-system - path: /path/to/patch.yaml + - target: + group: "" + version: v1 + kind: Service + name: cluster-autoscaler + namespace: kube-system + path: /path/to/patch.yaml patchesStrategicMerge: - - | - --- - apiVersion: v1 - kind: Service - metadata: - labels: - label1: value1 - name: cluster-autoscaler - namespace: kube-system + - | + --- + apiVersion: v1 + kind: Service + metadata: + labels: + label1: value1 + name: cluster-autoscaler + namespace: kube-system secretGenerator: - - name: a-secret - files: - - /path/to/config.example - - name: b-secret - envs: - - /path/to/envs.env + - name: a-secret + files: + - /path/to/config.example + - name: b-secret + envs: + - /path/to/envs.env common: provider: type: eks modules: - aws: { } + aws: {} dr: type: eks velero: @@ -119,9 +121,9 @@ spec: policy: type: gatekeeper gatekeeper: - additionalExcludedNamespaces: [] - installDefaultPolicies: true - enforcementAction: deny + additionalExcludedNamespaces: [] + installDefaultPolicies: true + enforcementAction: deny distributionVersion: v1.24.1 region: eu-west-1 toolsConfiguration: diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/011-ok.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/011-ok.yaml index d6ae66433..0c0d66952 100644 --- a/tests/schemas/public/ekscluster-kfd-v1alpha2/011-ok.yaml +++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/011-ok.yaml @@ -19,11 +19,11 @@ spec: vpcId: vpc-0123456789abcdef0 ssh: allowedFromCidrs: - - 0.0.0.0/0 + - 0.0.0.0/0 githubUsersName: - - jnardiello + - jnardiello publicKeys: - - ssh-ed25519 SomethingSomething engineering@sighup.io + - ssh-ed25519 SomethingSomething engineering@sighup.io vpnClientsSubnetCidr: 192.168.200.0/24 kubernetes: apiServer: @@ -38,10 +38,12 @@ spec: - subnet-0123456789abcdef2 nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks @@ -51,42 +53,42 @@ spec: distribution: customPatches: configMapGenerator: - - name: a-configmap - files: - - /path/to/config.example - - name: b-configmap - envs: - - /path/to/envs.env + - name: a-configmap + files: + - /path/to/config.example + - name: b-configmap + envs: + - /path/to/envs.env patches: - - target: - group: "" - version: v1 - kind: Service - name: cluster-autoscaler - namespace: kube-system - path: /path/to/patch.yaml + - target: + group: "" + version: v1 + kind: Service + name: cluster-autoscaler + namespace: kube-system + path: /path/to/patch.yaml patchesStrategicMerge: - - | - --- - apiVersion: v1 - kind: Service - metadata: - labels: - label1: value1 - name: cluster-autoscaler - namespace: kube-system + - | + --- + apiVersion: v1 + kind: Service + metadata: + labels: + label1: value1 + name: cluster-autoscaler + namespace: kube-system secretGenerator: - - name: a-secret - files: - - /path/to/config.example - - name: b-secret - envs: - - /path/to/envs.env + - name: a-secret + files: + - /path/to/config.example + - name: b-secret + envs: + - /path/to/envs.env common: provider: type: eks modules: - aws: { } + aws: {} dr: type: eks velero: @@ -120,9 +122,9 @@ spec: policy: type: gatekeeper gatekeeper: - additionalExcludedNamespaces: [] - installDefaultPolicies: true - enforcementAction: deny + additionalExcludedNamespaces: [] + installDefaultPolicies: true + enforcementAction: deny distributionVersion: v1.24.1 region: eu-west-1 toolsConfiguration: diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/012-no.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/012-no.yaml new file mode 100644 index 000000000..8aab91a86 --- /dev/null +++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/012-no.yaml @@ -0,0 +1,135 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +# Tests the following cases: + +# Given "spec.kubernetes.nodePools.0.type" is 'eks-managed' and "spec.kubernetes.nodePools.0.ami.id" is filled +# When I validate the config against the schema +# Then an error "$ref/properties/nodePools/items/$ref/then/properties/ami/properties/id/type: expected null, but got string" is returned + +--- +apiVersion: kfd.sighup.io/v1alpha2 +kind: EKSCluster +metadata: + name: furyctl-dev-aws-al +spec: + infrastructure: + vpn: + ssh: + allowedFromCidrs: + - 0.0.0.0/0 + githubUsersName: + - jnardiello + publicKeys: + - ssh-ed25519 SomethingSomething engineering@sighup.io + vpnClientsSubnetCidr: 192.168.200.0/24 + kubernetes: + apiServer: + privateAccess: true + privateAccessCidrs: ["10.0.0.3/16"] + publicAccessCidrs: [] + publicAccess: false + vpcId: vpc-0123456789abcdef0 + subnetIds: + - subnet-0123456789abcdef0 + - subnet-0123456789abcdef1 + - subnet-0123456789abcdef2 + nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io + nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" + nodePools: + - ami: + id: ami-01234567890123456 + owner: "123456789012" + type: eks-managed + instance: + type: t3.large + name: worker-eks + size: + max: 3 + min: 2 + distribution: + customPatches: + configMapGenerator: + - name: a-configmap + files: + - /path/to/config.example + - name: b-configmap + envs: + - /path/to/envs.env + patches: + - target: + group: "" + version: v1 + kind: Service + name: cluster-autoscaler + namespace: kube-system + path: /path/to/patch.yaml + patchesStrategicMerge: + - | + --- + apiVersion: v1 + kind: Service + metadata: + labels: + label1: value1 + name: cluster-autoscaler + namespace: kube-system + secretGenerator: + - name: a-secret + files: + - /path/to/config.example + - name: b-secret + envs: + - /path/to/envs.env + common: + provider: + type: eks + modules: + aws: {} + dr: + type: eks + velero: + eks: + bucketName: example-velero + region: eu-west-1 + ingress: + baseDomain: furyctl-demo.sighup.io + dns: + private: + create: true + name: internal.furyctl-demo.sighup.io + public: + create: true + name: furyctl-demo.sighup.io + nginx: + type: single + tls: + provider: secret + secret: + ca: | + value + cert: | + value + key: | + value + logging: + type: opensearch + opensearch: + type: single + policy: + type: gatekeeper + gatekeeper: + additionalExcludedNamespaces: [] + installDefaultPolicies: true + enforcementAction: deny + distributionVersion: v1.24.1 + region: eu-west-1 + toolsConfiguration: + terraform: + state: + s3: + bucketName: furyctl-test-eks + keyPrefix: furyctl-test + region: eu-west-1 diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/012-ok.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/012-ok.yaml new file mode 100644 index 000000000..5b6353984 --- /dev/null +++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/012-ok.yaml @@ -0,0 +1,135 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +# Tests the following cases: + +# Given "spec.kubernetes.nodePools.0.type" is 'eks-managed' and "spec.kubernetes.nodePools.0.ami.id" is not filled +# When I validate the config against the schema +# Then no errors are returned + +--- +apiVersion: kfd.sighup.io/v1alpha2 +kind: EKSCluster +metadata: + name: furyctl-dev-aws-al +spec: + infrastructure: + vpn: + ssh: + allowedFromCidrs: + - 0.0.0.0/0 + githubUsersName: + - jnardiello + publicKeys: + - ssh-ed25519 SomethingSomething engineering@sighup.io + vpnClientsSubnetCidr: 192.168.200.0/24 + kubernetes: + apiServer: + privateAccess: true + privateAccessCidrs: ["10.0.0.3/16"] + publicAccessCidrs: [] + publicAccess: false + vpcId: vpc-0123456789abcdef0 + subnetIds: + - subnet-0123456789abcdef0 + - subnet-0123456789abcdef1 + - subnet-0123456789abcdef2 + nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io + nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" + nodePools: + - ami: + id: ami-01234567890123456 + owner: "123456789012" + type: eks-managed + instance: + type: t3.large + name: worker-eks + size: + max: 3 + min: 2 + distribution: + customPatches: + configMapGenerator: + - name: a-configmap + files: + - /path/to/config.example + - name: b-configmap + envs: + - /path/to/envs.env + patches: + - target: + group: "" + version: v1 + kind: Service + name: cluster-autoscaler + namespace: kube-system + path: /path/to/patch.yaml + patchesStrategicMerge: + - | + --- + apiVersion: v1 + kind: Service + metadata: + labels: + label1: value1 + name: cluster-autoscaler + namespace: kube-system + secretGenerator: + - name: a-secret + files: + - /path/to/config.example + - name: b-secret + envs: + - /path/to/envs.env + common: + provider: + type: eks + modules: + aws: {} + dr: + type: eks + velero: + eks: + bucketName: example-velero + region: eu-west-1 + ingress: + baseDomain: furyctl-demo.sighup.io + dns: + private: + create: true + name: internal.furyctl-demo.sighup.io + public: + create: true + name: furyctl-demo.sighup.io + nginx: + type: single + tls: + provider: secret + secret: + ca: | + value + cert: | + value + key: | + value + logging: + type: opensearch + opensearch: + type: single + policy: + type: gatekeeper + gatekeeper: + additionalExcludedNamespaces: [] + installDefaultPolicies: true + enforcementAction: deny + distributionVersion: v1.24.1 + region: eu-west-1 + toolsConfiguration: + terraform: + state: + s3: + bucketName: furyctl-test-eks + keyPrefix: furyctl-test + region: eu-west-1 From 22e54b10551e8cbe235dcc3214e6427732f0b667 Mon Sep 17 00:00:00 2001 From: Samuele Chiocca Date: Thu, 28 Nov 2024 15:55:48 +0100 Subject: [PATCH 2/4] docs: update compatibility matrix --- docs/COMPATIBILITY_MATRIX.md | 41 ++++++++++++++++++++++-------------- 1 file changed, 25 insertions(+), 16 deletions(-) diff --git a/docs/COMPATIBILITY_MATRIX.md b/docs/COMPATIBILITY_MATRIX.md index 7fd39429c..060e42031 100644 --- a/docs/COMPATIBILITY_MATRIX.md +++ b/docs/COMPATIBILITY_MATRIX.md @@ -8,22 +8,31 @@ For a complete list of all KFD releases and their compatibility with Kubernetes ℹ️ **Use the latest patch release for your desired version whenever it's possible**. See [the versioning file](VERSIONING.md) for more information. -| KFD / Kubernetes Version | v1.28.X | v1.27.X | -| ----------------------------------------------------------------------------- | ------------------ | ------------------ | -| [v1.28.4](https://github.com/sighupio/fury-distribution/releases/tag/v1.28.4) | :white_check_mark: | | -| [v1.28.3](https://github.com/sighupio/fury-distribution/releases/tag/v1.28.3) | :white_check_mark: | | -| [v1.28.2](https://github.com/sighupio/fury-distribution/releases/tag/v1.28.2) | :white_check_mark: | | -| [v1.28.1](https://github.com/sighupio/fury-distribution/releases/tag/v1.28.1) | :white_check_mark: | | -| [v1.28.0](https://github.com/sighupio/fury-distribution/releases/tag/v1.28.0) | :white_check_mark: | | -| [v1.27.8](https://github.com/sighupio/fury-distribution/releases/tag/v1.27.8) | | :white_check_mark: | -| [v1.27.7](https://github.com/sighupio/fury-distribution/releases/tag/v1.27.7) | | :white_check_mark: | -| [v1.27.6](https://github.com/sighupio/fury-distribution/releases/tag/v1.27.6) | | :white_check_mark: | -| [v1.27.5](https://github.com/sighupio/fury-distribution/releases/tag/v1.27.5) | | :white_check_mark: | -| [v1.27.4](https://github.com/sighupio/fury-distribution/releases/tag/v1.27.4) | | :white_check_mark: | -| [v1.27.3](https://github.com/sighupio/fury-distribution/releases/tag/v1.27.3) | | :white_check_mark: | -| [v1.27.2](https://github.com/sighupio/fury-distribution/releases/tag/v1.27.2) | | :white_check_mark: | -| [v1.27.1](https://github.com/sighupio/fury-distribution/releases/tag/v1.27.1) | | :white_check_mark: | -| [v1.27.0](https://github.com/sighupio/fury-distribution/releases/tag/v1.27.0) | | :white_check_mark: | +| KFD / Kubernetes Version | v1.30.X | v1.29.X | v1.28.X | v1.27.X | +| ----------------------------------------------------------------------------- | ------------------ | ------------------ | ------------------ | ------------------ | +| [v1.30.0](https://github.com/sighupio/fury-distribution/releases/tag/v1.30.0) | :white_check_mark: | | | | +| [v1.29.5](https://github.com/sighupio/fury-distribution/releases/tag/v1.29.5) | | :white_check_mark: | | | +| [v1.29.4](https://github.com/sighupio/fury-distribution/releases/tag/v1.29.4) | | :white_check_mark: | | | +| [v1.29.3](https://github.com/sighupio/fury-distribution/releases/tag/v1.29.3) | | :white_check_mark: | | | +| [v1.29.2](https://github.com/sighupio/fury-distribution/releases/tag/v1.29.2) | | :white_check_mark: | | | +| [v1.29.1](https://github.com/sighupio/fury-distribution/releases/tag/v1.29.1) | | :white_check_mark: | | | +| [v1.29.0](https://github.com/sighupio/fury-distribution/releases/tag/v1.29.0) | | :white_check_mark: | | | +| [v1.28.5](https://github.com/sighupio/fury-distribution/releases/tag/v1.28.5) | | | :white_check_mark: | | +| [v1.28.4](https://github.com/sighupio/fury-distribution/releases/tag/v1.28.4) | | | :white_check_mark: | | +| [v1.28.3](https://github.com/sighupio/fury-distribution/releases/tag/v1.28.3) | | | :white_check_mark: | | +| [v1.28.2](https://github.com/sighupio/fury-distribution/releases/tag/v1.28.2) | | | :white_check_mark: | | +| [v1.28.1](https://github.com/sighupio/fury-distribution/releases/tag/v1.28.1) | | | :white_check_mark: | | +| [v1.28.0](https://github.com/sighupio/fury-distribution/releases/tag/v1.28.0) | | | :white_check_mark: | | +| [v1.27.9](https://github.com/sighupio/fury-distribution/releases/tag/v1.27.9) | | | | :white_check_mark: | +| [v1.27.8](https://github.com/sighupio/fury-distribution/releases/tag/v1.27.8) | | | | :white_check_mark: | +| [v1.27.7](https://github.com/sighupio/fury-distribution/releases/tag/v1.27.7) | | | | :white_check_mark: | +| [v1.27.6](https://github.com/sighupio/fury-distribution/releases/tag/v1.27.6) | | | | :white_check_mark: | +| [v1.27.5](https://github.com/sighupio/fury-distribution/releases/tag/v1.27.5) | | | | :white_check_mark: | +| [v1.27.4](https://github.com/sighupio/fury-distribution/releases/tag/v1.27.4) | | | | :white_check_mark: | +| [v1.27.3](https://github.com/sighupio/fury-distribution/releases/tag/v1.27.3) | | | | :white_check_mark: | +| [v1.27.2](https://github.com/sighupio/fury-distribution/releases/tag/v1.27.2) | | | | :white_check_mark: | +| [v1.27.1](https://github.com/sighupio/fury-distribution/releases/tag/v1.27.1) | | | | :white_check_mark: | +| [v1.27.0](https://github.com/sighupio/fury-distribution/releases/tag/v1.27.0) | | | | :white_check_mark: | | Legend | Meaning | | :----------------: | ---------------- | From 2e0c2f5fc56317a5a8ec83f65f9abc8af5d6c3b6 Mon Sep 17 00:00:00 2001 From: Samuele Chiocca Date: Thu, 28 Nov 2024 16:01:15 +0100 Subject: [PATCH 3/4] docs: align v1.28.5 relaase with correct versions --- docs/releases/v1.28.5.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/releases/v1.28.5.md b/docs/releases/v1.28.5.md index 34981617e..40b409f0d 100644 --- a/docs/releases/v1.28.5.md +++ b/docs/releases/v1.28.5.md @@ -1,10 +1,10 @@ -# Kubernetes Fury Distribution Release v1.29.5 +# Kubernetes Fury Distribution Release v1.28.5 -Welcome to KFD release `v1.29.5`. This patch release also updates Kubernetes from 1.29.3 to 1.29.10 on the OnPremises provider. +Welcome to KFD release `v1.28.5`. This patch release also updates Kubernetes from 1.28.7 to 1.28.15 on the OnPremises provider. The distribution is maintained with ❤️ by the team [SIGHUP](https://sighup.io/). -## New Features since `v1.29.4` +## New Features since `v1.28.4` ### Installer Updates From e8c68cb4d748e847a506a44e1c0b1fb9152d6528 Mon Sep 17 00:00:00 2001 From: Ramiro Algozino Date: Fri, 29 Nov 2024 11:40:46 +0100 Subject: [PATCH 4/4] chore(e2e): bump furyctl to 0.30.1 --- .drone.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.drone.yml b/.drone.yml index 01e57d36a..ae43a736a 100644 --- a/.drone.yml +++ b/.drone.yml @@ -80,7 +80,7 @@ steps: environment: NETRC_FILE: from_secret: NETRC_FILE - FURYCTL_VERSION: v0.30.0 + FURYCTL_VERSION: v0.30.1 FURYCTL_CONFIG: tests/e2e/kfddistribution/furyctl-init-cluster.yaml FURYCTL_DISTRO_LOCATION: ./ FURYCTL_OUTDIR: ./ @@ -169,7 +169,7 @@ steps: environment: CLUSTER_NAME: ${DRONE_REPO_NAME}-${DRONE_BUILD_NUMBER} KUBECONFIG: /drone/src/kubeconfig - FURYCTL_VERSION: v0.30.0 + FURYCTL_VERSION: v0.30.1 depends_on: [create Kind cluster] commands: - export KUBECONFIG=/drone/src/kubeconfig @@ -272,7 +272,7 @@ steps: environment: CLUSTER_NAME: ${DRONE_REPO_NAME}-${DRONE_BUILD_NUMBER}-upgrades KUBECONFIG: /drone/src/kubeconfig-upgrades - FURYCTL_VERSION: v0.30.0 + FURYCTL_VERSION: v0.30.1 depends_on: [create Kind cluster] commands: - export KUBECONFIG=/drone/src/kubeconfig-upgrades