diff --git a/.changelog/2592.txt b/.changelog/2592.txt new file mode 100644 index 0000000000..96bf3f30d7 --- /dev/null +++ b/.changelog/2592.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +`kubernetes_manifest` - handling "404 Not Found" errors during the deletion of Kubernetes resources, particularly in cases where the resource may have already been deleted by an operator managing the CRD before Terraform attempts to delete it. +``` \ No newline at end of file diff --git a/.changelog/2595.txt b/.changelog/2595.txt new file mode 100644 index 0000000000..2f31c97a50 --- /dev/null +++ b/.changelog/2595.txt @@ -0,0 +1,3 @@ +```release-note:improvement +resource/kubernetes_deployment_v1: Fix validation of `restart_policy` values +``` \ No newline at end of file diff --git a/.changelog/2596.txt b/.changelog/2596.txt new file mode 100644 index 0000000000..50e5816094 --- /dev/null +++ b/.changelog/2596.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +Properly handle Kubernetes Jobs with ttl_seconds_after_finished = 0 to prevent unnecessary recreation. +``` \ No newline at end of file diff --git a/.changelog/2604.txt b/.changelog/2604.txt new file mode 100644 index 0000000000..bfb657a013 --- /dev/null +++ b/.changelog/2604.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +Adding the `kubernetes_secret_v1_data` resource to the kubernetes provider. This resource will allow users to manage kubernetes secrets +``` \ No newline at end of file diff --git a/.changelog/2612.txt b/.changelog/2612.txt new file mode 100644 index 0000000000..41a44644cd --- /dev/null +++ b/.changelog/2612.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +Added `conditions` attribute to `kubernetes_nodes` data source, which will provide detailed node health and status information +``` \ No newline at end of file diff --git a/.github/workflows/acceptance_test_dfa.yaml b/.github/workflows/acceptance_test_dfa.yaml index aa0b99e5d4..724a9ab05c 100644 --- a/.github/workflows/acceptance_test_dfa.yaml +++ b/.github/workflows/acceptance_test_dfa.yaml @@ -4,6 +4,7 @@ on: pull_request: branches: - main + - v3-major-release paths: - "manifest/**/*.go" - 'kubernetes/**/*.go' diff --git a/.github/workflows/acceptance_tests_kind.yaml b/.github/workflows/acceptance_tests_kind.yaml index f2e48e03e2..a7334629ad 100644 --- a/.github/workflows/acceptance_tests_kind.yaml +++ b/.github/workflows/acceptance_tests_kind.yaml @@ -18,6 +18,7 @@ on: pull_request: branches: - main + - v3-major-release paths: - 'kubernetes/*.go' - 'go.mod' @@ -41,12 +42,19 @@ jobs: matrix: kubernetes_version: # kind images: https://github.com/kubernetes-sigs/kind/releases (note the images are kind release specific) + - v1.31.2@sha256:33034c0a75dd82b2f2f22bdf0a30ea2a42b2c3547a6d56c52c7ea9c1b5fb89b9 + - v1.30.0@sha256:047357ac0cfea04663786a612ba1eaba9702bef25227a794b52890dd8bcd692e - v1.29.0@sha256:eaa1450915475849a73a9227b8f201df25e55e268e5d619312131292e324d570 - v1.28.0@sha256:b7a4cad12c197af3ba43202d3efe03246b3f0793f162afb40a33c923952d5b31 - v1.27.3@sha256:3966ac761ae0136263ffdb6cfd4db23ef8a83cba8a463690e98317add2c9ba72 - v1.26.6@sha256:6e2d8b28a5b601defe327b98bd1c2d1930b49e5d8c512e1895099e4504007adb - v1.25.11@sha256:227fa11ce74ea76a0474eeefb84cb75d8dad1b08638371ecf0e86259b35be0c8 - v1.23.17@sha256:59c989ff8a517a93127d4a536e7014d28e235fb3529d9fba91b3951d461edfdb + isMain: + - ${{ contains(github.ref, 'main') }} + exclude: + - isMain: true + kubernetes_version: v1.31.2@sha256:33034c0a75dd82b2f2f22bdf0a30ea2a42b2c3547a6d56c52c7ea9c1b5fb89b9 steps: - name: Checkout repository uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 diff --git a/.github/workflows/check_examples.yaml b/.github/workflows/check_examples.yaml index 1135d2fa39..b92a967f0d 100644 --- a/.github/workflows/check_examples.yaml +++ b/.github/workflows/check_examples.yaml @@ -4,12 +4,14 @@ on: push: branches: - main + - v3-major-release paths: - "_examples/kubernetes_manifest/**" - "**.go" pull_request: branches: - main + - v3-major-release paths: - "_examples/kubernetes_manifest/**" - "**.go" diff --git a/.github/workflows/documentation-check.yaml b/.github/workflows/documentation-check.yaml index bdc29395f1..7e19deb1e7 100644 --- a/.github/workflows/documentation-check.yaml +++ b/.github/workflows/documentation-check.yaml @@ -1,42 +1,42 @@ -name: "Documentation Updates" +# name: "Documentation Updates" -on: - pull_request: - paths: - - 'docs/**' - types: [opened, synchronize, labeled] +# on: +# pull_request: +# paths: +# - 'docs/**' +# types: [opened, synchronize, labeled] - push: - branches: - - main +# push: +# branches: +# - main -jobs: - check-docs: - runs-on: ubuntu-latest +# jobs: +# check-docs: +# runs-on: ubuntu-latest - if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-documentation') }} +# if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-documentation') }} - steps: - - name: Checkout repository - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 +# steps: +# - name: Checkout repository +# uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 - - name: Set up Go - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 - with: - go-version-file: 'go.mod' +# - name: Set up Go +# uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 +# with: +# go-version-file: 'go.mod' - - name: Install tfplugindocs command - run: go install github.com/hashicorp/terraform-plugin-docs/cmd/tfplugindocs@latest +# - name: Install tfplugindocs command +# run: go install github.com/hashicorp/terraform-plugin-docs/cmd/tfplugindocs@latest - - name: Run tfplugindocs command - run: tfplugindocs generate +# - name: Run tfplugindocs command +# run: tfplugindocs generate - - name: Check for changes - run: | - git diff --exit-code +# - name: Check for changes +# run: | +# git diff --exit-code - - name: Undocumented changes - run: | - echo "Documentation is not up to date. Please refer to the `Making Changes` in the Contribution Guide on how to properly update documentation." - exit 1 - if: failure() \ No newline at end of file +# - name: Undocumented changes +# run: | +# echo 'Documentation is not up to date. Please refer to the `Making Changes` in the Contribution Guide on how to properly update documentation.' +# exit 1 +# if: failure() \ No newline at end of file diff --git a/.github/workflows/golangci-lint.yaml b/.github/workflows/golangci-lint.yaml index f715117439..3f8951e05e 100644 --- a/.github/workflows/golangci-lint.yaml +++ b/.github/workflows/golangci-lint.yaml @@ -5,6 +5,7 @@ on: pull_request: branches: - main + - v3-major-release paths: - '**/*.go' - '**/go.mod' diff --git a/.github/workflows/manifest_acc.yaml b/.github/workflows/manifest_acc.yaml index 2d4152fa30..678df55f24 100644 --- a/.github/workflows/manifest_acc.yaml +++ b/.github/workflows/manifest_acc.yaml @@ -4,12 +4,14 @@ on: push: branches: - main + - v3-major-release paths: - "manifest/**/*.go" - "manifest/**/go.mod" pull_request: branches: - main + - v3-major-release paths: - "manifest/**/*.go" - "manifest/**/go.mod" @@ -27,14 +29,23 @@ jobs: fail-fast: false matrix: kubernetes_version: - # kind images: https://github.com/kubernetes-sigs/kind/releases + # kind images: https://github.com/kubernetes-sigs/kind/releases (note the images are kind release specific) + - v1.31.2@sha256:33034c0a75dd82b2f2f22bdf0a30ea2a42b2c3547a6d56c52c7ea9c1b5fb89b9 - v1.30.0@sha256:047357ac0cfea04663786a612ba1eaba9702bef25227a794b52890dd8bcd692e + - v1.29.0@sha256:eaa1450915475849a73a9227b8f201df25e55e268e5d619312131292e324d570 - v1.28.0@sha256:b7a4cad12c197af3ba43202d3efe03246b3f0793f162afb40a33c923952d5b31 - v1.27.3@sha256:3966ac761ae0136263ffdb6cfd4db23ef8a83cba8a463690e98317add2c9ba72 - v1.26.6@sha256:6e2d8b28a5b601defe327b98bd1c2d1930b49e5d8c512e1895099e4504007adb - v1.25.11@sha256:227fa11ce74ea76a0474eeefb84cb75d8dad1b08638371ecf0e86259b35be0c8 - v1.23.17@sha256:59c989ff8a517a93127d4a536e7014d28e235fb3529d9fba91b3951d461edfdb + isMain: + - ${{ contains(github.ref, 'main') }} + exclude: + - isMain: true + kubernetes_version: v1.31.2@sha256:33034c0a75dd82b2f2f22bdf0a30ea2a42b2c3547a6d56c52c7ea9c1b5fb89b9 + terraform_version: + - 1.9.8 - 1.8.5 - 1.6.6 - 1.5.7 @@ -43,6 +54,7 @@ jobs: - 1.2.9 - 1.1.9 - 1.0.11 + steps: - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 - name: Set up Go @@ -52,7 +64,7 @@ jobs: - name: Setup kind uses: helm/kind-action@0025e74a8c7512023d06dc019c617aa3cf561fde # v1.10.0 with: - version: v0.20.0 + version: v0.21.0 node_image: kindest/node:${{ matrix.kubernetes_version }} # By default, this action creates a cluster with the name 'chart-testing' cluster_name: manifest diff --git a/.github/workflows/manifest_unit.yaml b/.github/workflows/manifest_unit.yaml index a90a6390b7..9ab1a24c9b 100644 --- a/.github/workflows/manifest_unit.yaml +++ b/.github/workflows/manifest_unit.yaml @@ -4,12 +4,14 @@ on: push: branches: - main + - v3-major-release paths: - "manifest/**/*.go" - "manifest/**/go.mod" pull_request: branches: - main + - v3-major-release paths: - "manifest/**/*.go" - "manifest/**/go.mod" diff --git a/.github/workflows/provider_functions_unit.yaml b/.github/workflows/provider_functions_unit.yaml index d2b2428685..4ebcb9d842 100644 --- a/.github/workflows/provider_functions_unit.yaml +++ b/.github/workflows/provider_functions_unit.yaml @@ -4,11 +4,13 @@ on: push: branches: - main + - v3-major-release paths: - "internal/framework/provider/functions/**/*.go" pull_request: branches: - main + - v3-major-release paths: - "internal/framework/provider/functions/**/*.go" workflow_dispatch: diff --git a/.github/workflows/unit-tests.yml b/.github/workflows/unit-tests.yml index 91c6bb973b..d94b010729 100644 --- a/.github/workflows/unit-tests.yml +++ b/.github/workflows/unit-tests.yml @@ -4,9 +4,11 @@ on: push: branches: - main + - v3-major-release pull_request: branches: - main + - v3-major-release paths: - 'kubernetes/*.go' workflow_dispatch: diff --git a/CHANGELOG.md b/CHANGELOG.md index 2b163ec2d9..57e4b56116 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,12 @@ +## 2.33.0 (Oct 10, 2024) + +ENHANCEMENTS: + +* Add `backoff_per_limit_index` and `max_failed_indexes` fields in `structure_job.go` [[GH-2421](https://github.com/hashicorp/terraform-provider-kubernetes/issues/2421)] +* Added support for `namespace_selector` field in `PodAffinityTerm` to enhance pod affinity and anti-affinity rules, allowing selection of namespaces based on label selectors. [[GH-2577](https://github.com/hashicorp/terraform-provider-kubernetes/issues/2577)] +* `kubernetes_manifest` - handling "404 Not Found" errors during the deletion of Kubernetes resources, particularly in cases where the resource may have already been deleted by an operator managing the CRD before Terraform attempts to delete it. [[GH-2592](https://github.com/hashicorp/terraform-provider-kubernetes/issues/2592)] +* `schema_container.go`: Add VolumeDevices [[GH-2573](https://github.com/hashicorp/terraform-provider-kubernetes/issues/2573)] + ## 2.32.0 (Aug 14, 2024) FEATURES: diff --git a/_about/CONTRIBUTING.md b/_about/CONTRIBUTING.md index 75cff8d701..27a503fcef 100644 --- a/_about/CONTRIBUTING.md +++ b/_about/CONTRIBUTING.md @@ -10,7 +10,7 @@ If you want to learn more about developing a Terraform provider, please refer to [Install](https://go.dev/doc/install) the version of Golang as indicated in the [go.mod](../go.mod) file. -1. Fork this repo +2. Fork this repo [Fork](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/working-with-forks/fork-a-repo) the provider repository and clone it on your computer. @@ -23,7 +23,7 @@ If you want to learn more about developing a Terraform provider, please refer to From now on, we are going to assume that you have a copy of the repository on your computer and work within the `terraform-provider-kubernetes` directory. -1. Prepare a Kubernetes Cluster +3. Prepare a Kubernetes Cluster While our preference is to use [KinD](https://kind.sigs.k8s.io/) for setting up a Kubernetes cluster for development and test purposes, feel free to opt for the solution that best suits your preferences. Please bear in mind that some acceptance tests might require specific cluster settings, which we maintain in the KinD [configuration file](../.github/config/acceptance_tests_kind_config.yaml). @@ -55,30 +55,32 @@ If you want to learn more about developing a Terraform provider, please refer to This quick guide covers best practices for adding a new Resource. 1. Ensure all dependncies are installed. -1. Add an SDK Client. -1. Add Resource Schema and define attributes [see Kubernetes Documentation](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs). A best and recommended practice is reuse constants from the Kuberentes packages as a default value in an attribute or within a validation function. -1. Scaffold an empty/new resource. -1. Add Acceptance Tests(s) for the resource. -1. Run Acceptance Tests(s) for this resource. -1. Add Documentation for this resource by editing the `.md.tmpl` file to include the appropriate [Data Fields](https://pkg.go.dev/text/template) and executing `tfplugindocs generate` command [see Terraform PluginDocs](https://github.com/hashicorp/terraform-plugin-docs#data-fields) then inspecting the corresponding `.md` file in the `/docs` to see all changes. The Data Fields that are currently apart of the templates are those for the Schema ({{ .SchemaMarkdown }}), Name ({{ .Name }}) and ({{ .Description }}). -1. Execute `make docs-lint` and `make tests-lint` commands -1. Create a Pull Request for your changes. +2. Add an SDK Client. +3. Add Resource Schema and define attributes [see Kubernetes Documentation](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs). A best and recommended practice is reuse constants from the Kuberentes packages as a default value in an attribute or within a validation function. +4. Scaffold an empty/new resource. +5. Add Acceptance Tests(s) for the resource. +6. Run Acceptance Tests(s) for this resource. +7. Add documentation for this resource in the appropriate `docs/resources/_.go.md` file. + +8. Execute `make docs-lint` and `make tests-lint` commands +9. Create a Pull Request for your changes. ### Adding a New Data Source 1. Ensure all dependncies are installed. -1. Add an SDK Client. -1. Add Data Source Schema and define attributes [see Kubernetes Documentation](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs). +2. Add an SDK Client. +3. Add Data Source Schema and define attributes [see Kubernetes Documentation](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs). A best and recommended practice is reuse constants from the Kuberentes packages as a default value in an attribute or within a validation function. -1. Scaffold an empty/new resource. -1. Add Acceptance Tests(s) for the data source. -1. Run Acceptance Tests(s) for this data source. -1. Add Documentation for this data source by editing the `.md.tmpl` file to include the appropriate [Data Fields](https://pkg.go.dev/text/template) and executing `tfplugindocs generate` command [see Terraform PluginDocs](https://github.com/hashicorp/terraform-plugin-docs#data-fields) then inspecting the corresponding `.md` file in the `/docs` to see all changes. The Data Fields that are currently apart of the templates are those for the Schema ({{ .SchemaMarkdown }}), Name ({{ .Name }}) and ({{ .Description }}). -1. Execute `make docs-lint` and `make tests-lint` commands -1. Create a Pull Request for your changes. - -### Adding/Editing Documentation -All Documentation is edited in the `.md.tmpl` file. Please note that the `tfplugindocs generate` command should be executed to ensure it is updated and reflected in the `.md` files. +4. Scaffold an empty/new resource. +5. Add Acceptance Tests(s) for the data source. +6. Run Acceptance Tests(s) for this data source. +7. Add documentation for this data source in the appropriate `docs/data-sources/_.md` file. + +8. Execute `make docs-lint` and `make tests-lint` commands +9. Create a Pull Request for your changes. + + ## Testing diff --git a/docs/data-sources/nodes.md b/docs/data-sources/nodes.md index 54d5ec792a..3e5b9e41f6 100644 --- a/docs/data-sources/nodes.md +++ b/docs/data-sources/nodes.md @@ -82,7 +82,7 @@ Read-Only: - `allocatable` (Map of String) - `capacity` (Map of String) - `node_info` (List of Object) (see [below for nested schema](#nestedobjatt--nodes--status--node_info)) - +- `conditions` (List of Object) (see [below for nested schema](#nestedobjatt--nodes--status--conditions)) ### Nested Schema for `nodes.status.addresses` @@ -108,7 +108,17 @@ Read-Only: - `os_image` (String) - `system_uuid` (String) + +### Nested Schema for `nodes.status.conditions` + +Read-Only: +- `type` (String) +- `status` (String) +- `last_heartbeat_time` (String) +- `last_transition_time` (String) +- `reason` (String) +- `message` (String) diff --git a/docs/resources/deployment.md b/docs/resources/deployment.md index 1282ac3e7f..1fa160f220 100644 --- a/docs/resources/deployment.md +++ b/docs/resources/deployment.md @@ -67,7 +67,7 @@ Optional: Required: - `metadata` (Block List, Min: 1, Max: 1) Standard pod's metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata (see [below for nested schema](#nestedblock--spec--template--metadata)) -- `spec` (Block List, Min: 1, Max: 1) Spec defines the specification of the desired behavior of the deployment. More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.9/#deployment-v1-apps (see [below for nested schema](#nestedblock--spec--template--spec)) +- `spec` (Block List, Min: 1, Max: 1) Spec defines the specification of the desired behavior of the deployment. More info: https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/deployment-v1/ (see [below for nested schema](#nestedblock--spec--template--spec)) ### Nested Schema for `spec.template.metadata` diff --git a/docs/resources/deployment_v1.md b/docs/resources/deployment_v1.md index 99854140b6..049de98022 100644 --- a/docs/resources/deployment_v1.md +++ b/docs/resources/deployment_v1.md @@ -67,7 +67,7 @@ Optional: Required: - `metadata` (Block List, Min: 1, Max: 1) Standard pod's metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata (see [below for nested schema](#nestedblock--spec--template--metadata)) -- `spec` (Block List, Min: 1, Max: 1) Spec defines the specification of the desired behavior of the deployment. More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.9/#deployment-v1-apps (see [below for nested schema](#nestedblock--spec--template--spec)) +- `spec` (Block List, Min: 1, Max: 1) Spec defines the specification of the desired behavior of the deployment. More info: https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/deployment-v1/ (see [below for nested schema](#nestedblock--spec--template--spec)) ### Nested Schema for `spec.template.metadata` @@ -111,7 +111,7 @@ Optional: - `os` (Block List, Max: 1) Specifies the OS of the containers in the pod. (see [below for nested schema](#nestedblock--spec--template--spec--os)) - `priority_class_name` (String) If specified, indicates the pod's priority. "system-node-critical" and "system-cluster-critical" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default. - `readiness_gate` (Block List) If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to "True" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md (see [below for nested schema](#nestedblock--spec--template--spec--readiness_gate)) -- `restart_policy` (String) Restart policy for all containers within the pod. One of Always, OnFailure, Never. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy. +- `restart_policy` (String) Restart policy for all containers within the pod. Defaults to Always as the only option. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy. - `runtime_class_name` (String) RuntimeClassName is a feature for selecting the container runtime configuration. The container runtime configuration is used to run a Pod's containers. More info: https://kubernetes.io/docs/concepts/containers/runtime-class - `scheduler_name` (String) If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler. - `security_context` (Block List, Max: 1) SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty (see [below for nested schema](#nestedblock--spec--template--spec--security_context)) diff --git a/docs/resources/secret_v1_data.md b/docs/resources/secret_v1_data.md new file mode 100644 index 0000000000..4f8d92f68f --- /dev/null +++ b/docs/resources/secret_v1_data.md @@ -0,0 +1,58 @@ +--- +subcategory: "core/v1" +page_title: "Kubernetes: kubernetes_secret_v1_data" +description: |- + This resource allows Terraform to manage the data for a Secret that already exists. +--- + +# kubernetes_secret_v1_data + +This resource allows Terraform to manage data within a pre-existing Secret. This resource uses [field management](https://kubernetes.io/docs/reference/using-api/server-side-apply/#field-management) and [server-side apply](https://kubernetes.io/docs/reference/using-api/server-side-apply/) to manage only the data that is defined in the Terraform configuration. Existing data not specified in the configuration will be ignored. If data specified in the config is already managed by another client, it will cause a conflict which can be overridden by setting `force` to true. + + +## Schema + +### Required + +- `data` (Map of String) The data we want to add to the Secret. +- `metadata` (Block List, Min: 1, Max: 1) (see [below for nested schema](#nestedblock--metadata)) + +### Optional + +- `field_manager` (String) Set the name of the field manager for the specified labels. +- `force` (Boolean) Force overwriting data that is managed outside of Terraform. + +### Read-Only + +- `id` (String) The ID of this resource. + + +### Nested Schema for `metadata` + +Required: + +- `name` (String) The name of the Secret. + +Optional: + +- `namespace` (String) The namespace of the Secret. + +## Example Usage + +```terraform +resource "kubernetes_secret_v1_data" "example" { + metadata { + name = "my-secret" + } + data = { + "username" = "admin" + "password" = "s3cr3t" + } +} +``` + +## Import + +This resource does not support the `import` command. As this resource operates on Kubernetes resources that already exist, creating the resource is equivalent to importing it. + + diff --git a/kubernetes/data_source_kubernetes_nodes_test.go b/kubernetes/data_source_kubernetes_nodes_test.go index d733fa1105..68cd84723c 100644 --- a/kubernetes/data_source_kubernetes_nodes_test.go +++ b/kubernetes/data_source_kubernetes_nodes_test.go @@ -33,6 +33,13 @@ func TestAccKubernetesDataSourceNodes_basic(t *testing.T) { resource.TestCheckResourceAttrWith(dataSourceName, "nodes.0.status.0.capacity.memory", checkParsableQuantity), resource.TestCheckResourceAttrSet(dataSourceName, "nodes.0.status.0.node_info.0.architecture"), resource.TestCheckResourceAttrSet(dataSourceName, "nodes.0.status.0.addresses.0.address"), + resource.TestMatchResourceAttr(dataSourceName, "nodes.0.status.0.conditions.#", oneOrMore), + resource.TestCheckResourceAttrSet(dataSourceName, "nodes.0.status.0.conditions.0.type"), + resource.TestCheckResourceAttrSet(dataSourceName, "nodes.0.status.0.conditions.0.status"), + resource.TestCheckResourceAttrSet(dataSourceName, "nodes.0.status.0.conditions.0.last_heartbeat_time"), + resource.TestCheckResourceAttrSet(dataSourceName, "nodes.0.status.0.conditions.0.last_transition_time"), + resource.TestCheckResourceAttrSet(dataSourceName, "nodes.0.status.0.conditions.0.reason"), + resource.TestCheckResourceAttrSet(dataSourceName, "nodes.0.status.0.conditions.0.message"), ) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, diff --git a/kubernetes/provider.go b/kubernetes/provider.go index 6ae08a37c1..f0018379df 100644 --- a/kubernetes/provider.go +++ b/kubernetes/provider.go @@ -264,6 +264,7 @@ func Provider() *schema.Provider { "kubernetes_config_map_v1_data": resourceKubernetesConfigMapV1Data(), "kubernetes_secret": resourceKubernetesSecretV1(), "kubernetes_secret_v1": resourceKubernetesSecretV1(), + "kubernetes_secret_v1_data": resourceKubernetesSecretV1Data(), "kubernetes_pod": resourceKubernetesPodV1(), "kubernetes_pod_v1": resourceKubernetesPodV1(), "kubernetes_endpoints": resourceKubernetesEndpointsV1(), diff --git a/kubernetes/resource_kubernetes_deployment_v1.go b/kubernetes/resource_kubernetes_deployment_v1.go index a28338ff22..22749666be 100644 --- a/kubernetes/resource_kubernetes_deployment_v1.go +++ b/kubernetes/resource_kubernetes_deployment_v1.go @@ -16,6 +16,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -199,7 +200,7 @@ func resourceKubernetesDeploymentSchemaV1() map[string]*schema.Schema { Required: true, MaxItems: 1, Elem: &schema.Resource{ - Schema: podSpecFields(true, false), + Schema: deploymentPodTemplateSpecFields(), }, }, }, @@ -217,6 +218,16 @@ func resourceKubernetesDeploymentSchemaV1() map[string]*schema.Schema { } } +func deploymentPodTemplateSpecFields() map[string]*schema.Schema { + psf := podSpecFields(true, false) + rp := psf["restart_policy"] + rp.ValidateFunc = validation.StringInSlice([]string{ + string(corev1.RestartPolicyAlways), + }, false) + rp.Description = "Restart policy for all containers within the pod. Defaults to Always as the only option. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy." + return psf +} + func resourceKubernetesDeploymentV1Create(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { conn, err := meta.(KubeClientsets).MainClientset() if err != nil { diff --git a/kubernetes/resource_kubernetes_deployment_v1_test.go b/kubernetes/resource_kubernetes_deployment_v1_test.go index d93c716420..85872dec7e 100644 --- a/kubernetes/resource_kubernetes_deployment_v1_test.go +++ b/kubernetes/resource_kubernetes_deployment_v1_test.go @@ -1225,6 +1225,36 @@ func TestAccKubernetesDeploymentV1_config_with_automount_service_account_token(t }) } +func TestAccKubernetesDeploymentV1_with_restart_policy(t *testing.T) { + var conf appsv1.Deployment + name := fmt.Sprintf("tf-acc-test-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + resourceName := "kubernetes_deployment_v1.test" + imageName := busyboxImage + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + IDRefreshName: resourceName, + IDRefreshIgnore: []string{"metadata.0.resource_version"}, + ProviderFactories: testAccProviderFactories, + CheckDestroy: testAccCheckKubernetesDeploymentV1Destroy, + Steps: []resource.TestStep{ + { + Config: testAccKubernetesDeploymentV1Config_with_restart_policy(name, imageName, "Never"), + ExpectError: regexp.MustCompile("expected spec\\.0\\.template\\.0\\.spec\\.0\\.restart_policy to be one of \\[\"Always\"\\], got Never"), + }, + { + Config: testAccKubernetesDeploymentV1Config_with_restart_policy(name, imageName, "Always"), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckKubernetesDeploymentV1Exists(resourceName, &conf), + resource.TestCheckResourceAttrSet(resourceName, "metadata.0.generation"), + resource.TestCheckResourceAttrSet(resourceName, "metadata.0.resource_version"), + resource.TestCheckResourceAttrSet(resourceName, "metadata.0.uid"), + ), + }, + }, + }) +} + func testAccCheckKubernetesDeploymentForceNew(old, new *appsv1.Deployment, wantNew bool) resource.TestCheckFunc { return func(s *terraform.State) error { if wantNew { @@ -1409,6 +1439,39 @@ func testAccKubernetesDeploymentV1Config_basic(name, imageName string) string { `, name, imageName) } +func testAccKubernetesDeploymentV1Config_with_restart_policy(name, imageName, restartPolicy string) string { + return fmt.Sprintf(`resource "kubernetes_deployment_v1" "test" { + metadata { + name = "%s" + } + spec { + replicas = 2 + selector { + match_labels = { + TestLabelOne = "one" + } + } + template { + metadata { + labels = { + TestLabelOne = "one" + } + } + spec { + container { + image = "%s" + name = "tf-acc-test" + command = ["sleep", "300"] + } + restart_policy = "%s" + termination_grace_period_seconds = 1 + } + } + } +} +`, name, imageName, restartPolicy) +} + func testAccKubernetesDeploymentV1Config_initContainer(namespace, name, imageName, imageName1, memory, envName, initName, initCommand, pullPolicy string) string { return fmt.Sprintf(`resource "kubernetes_namespace_v1" "test" { metadata { diff --git a/kubernetes/resource_kubernetes_job_v1.go b/kubernetes/resource_kubernetes_job_v1.go index 572635b340..cd2615518f 100644 --- a/kubernetes/resource_kubernetes_job_v1.go +++ b/kubernetes/resource_kubernetes_job_v1.go @@ -118,8 +118,17 @@ func resourceKubernetesJobV1Read(ctx context.Context, d *schema.ResourceData, me return diag.FromErr(err) } if !exists { - d.SetId("") - return diag.Diagnostics{} + // Check if ttl_seconds_after_finished is set + if ttl, ok := d.GetOk("spec.0.ttl_seconds_after_finished"); ok { + // ttl_seconds_after_finished is set, Job is deleted due to TTL + // We don't need to remove the resource from the state + log.Printf("[INFO] Job %s has been deleted by Kubernetes due to TTL (ttl_seconds_after_finished = %v), keeping resource in state", d.Id(), ttl) + return diag.Diagnostics{} + } else { + // ttl_seconds_after_finished is not set, remove the resource from the state + d.SetId("") + return diag.Diagnostics{} + } } conn, err := meta.(KubeClientsets).MainClientset() if err != nil { @@ -204,7 +213,6 @@ func resourceKubernetesJobV1Update(ctx context.Context, d *schema.ResourceData, } return resourceKubernetesJobV1Read(ctx, d, meta) } - func resourceKubernetesJobV1Delete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { conn, err := meta.(KubeClientsets).MainClientset() if err != nil { diff --git a/kubernetes/resource_kubernetes_job_v1_test.go b/kubernetes/resource_kubernetes_job_v1_test.go index 396777de9b..2e85679c23 100644 --- a/kubernetes/resource_kubernetes_job_v1_test.go +++ b/kubernetes/resource_kubernetes_job_v1_test.go @@ -237,6 +237,82 @@ func TestAccKubernetesJobV1_ttl_seconds_after_finished(t *testing.T) { }) } +func TestAccKubernetesJobV1_customizeDiff_ttlZero(t *testing.T) { + var conf batchv1.Job + name := fmt.Sprintf("tf-acc-test-%s", acctest.RandString(10)) + imageName := busyboxImage + resourceName := "kubernetes_job_v1.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + skipIfClusterVersionLessThan(t, "1.21.0") + }, + ProviderFactories: testAccProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Create the Job + { + Config: testAccKubernetesJobV1Config_Diff(name, imageName, 0), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckKubernetesJobV1Exists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "spec.0.ttl_seconds_after_finished", "0"), + ), + }, + // Step 2: Wait for the Job to complete and be deleted + { + PreConfig: func() { + time.Sleep(30 * time.Second) + }, + Config: testAccKubernetesJobV1Config_Diff(name, imageName, 0), + PlanOnly: true, + ExpectNonEmptyPlan: false, + }, + }, + }) +} + +func TestAccKubernetesJobV1_updateTTLFromZero(t *testing.T) { + var conf batchv1.Job + name := fmt.Sprintf("tf-acc-test-%s", acctest.RandString(10)) + imageName := busyboxImage + resourceName := "kubernetes_job_v1.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + skipIfClusterVersionLessThan(t, "1.21.0") + }, + ProviderFactories: testAccProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Create the Job with ttl_seconds_after_finished = 0 + { + Config: testAccKubernetesJobV1Config_Diff(name, imageName, 0), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckKubernetesJobV1Exists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "spec.0.ttl_seconds_after_finished", "0"), + ), + }, + // Step 2: Wait for the Job to complete and be deleted + { + PreConfig: func() { + time.Sleep(30 * time.Second) + }, + Config: testAccKubernetesJobV1Config_Diff(name, imageName, 0), + PlanOnly: true, + ExpectNonEmptyPlan: false, + }, + // Step 3: Update the Job to ttl_seconds_after_finished = 5 + { + Config: testAccKubernetesJobV1Config_Diff(name, imageName, 5), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckKubernetesJobV1Exists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "spec.0.ttl_seconds_after_finished", "5"), + ), + }, + }, + }) +} + func testAccCheckJobV1Waited(minDuration time.Duration) func(*terraform.State) error { // NOTE this works because this function is called when setting up the test // and the function it returns is called after the resource has been created @@ -516,3 +592,28 @@ func testAccKubernetesJobV1Config_modified(name, imageName string) string { wait_for_completion = false }`, name, imageName) } + +func testAccKubernetesJobV1Config_Diff(name, imageName string, ttl int) string { + return fmt.Sprintf(` +resource "kubernetes_job_v1" "test" { + metadata { + name = "%s" + } + spec { + ttl_seconds_after_finished = %d + template { + metadata {} + spec { + container { + name = "wait-test" + image = "%s" + command = ["sleep", "20"] + } + restart_policy = "Never" + } + } + } + wait_for_completion = false +} +`, name, ttl, imageName) +} diff --git a/kubernetes/resource_kubernetes_secret_v1_data.go b/kubernetes/resource_kubernetes_secret_v1_data.go new file mode 100644 index 0000000000..75a59eb408 --- /dev/null +++ b/kubernetes/resource_kubernetes_secret_v1_data.go @@ -0,0 +1,245 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package kubernetes + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "k8s.io/apimachinery/pkg/api/errors" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/ptr" +) + +func resourceKubernetesSecretV1Data() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceKubernetesSecretV1DataCreate, + ReadContext: resourceKubernetesSecretV1DataRead, + UpdateContext: resourceKubernetesSecretV1DataUpdate, + DeleteContext: resourceKubernetesSecretV1DataDelete, + + Schema: map[string]*schema.Schema{ + "metadata": { + Type: schema.TypeList, + Description: "Metadata for the kubernetes Secret.", + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Description: "The name of the Secret.", + Required: true, + ForceNew: true, + }, + "namespace": { + Type: schema.TypeString, + Description: "The namespace of the Secret.", + Optional: true, + ForceNew: true, + Default: "default", + }, + }, + }, + }, + "data": { + Type: schema.TypeMap, + Description: "Data to be stored in the Kubernetes Secret.", + Required: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "force": { + Type: schema.TypeBool, + Description: "Flag to force updates to the Kubernetes Secret.", + Optional: true, + }, + "field_manager": { + Type: schema.TypeString, + Description: "Set the name of the field manager for the specified labels", + Optional: true, + Default: defaultFieldManagerName, + }, + }, + } +} + +func resourceKubernetesSecretV1DataCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + metadata := expandMetadata(d.Get("metadata").([]any)) + // Sets the resource id based on the metadata + d.SetId(buildId(metadata)) + + //Calling the update function ensuring resource config is correct + diag := resourceKubernetesSecretV1DataUpdate(ctx, d, m) + if diag.HasError() { + d.SetId("") + } + return diag +} + +// Retrieves the current state of the k8s secret, and update the current sate +func resourceKubernetesSecretV1DataRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + conn, err := m.(KubeClientsets).MainClientset() + if err != nil { + return diag.FromErr(err) + } + + namespace, name, err := idParts(d.Id()) + if err != nil { + return diag.FromErr(err) + } + + // getting the secret data + res, err := conn.CoreV1().Secrets(namespace).Get(ctx, name, v1.GetOptions{}) + if err != nil { + if errors.IsNotFound(err) { + return diag.Diagnostics{{ + Severity: diag.Warning, + Summary: "Secret deleted", + Detail: fmt.Sprintf("The underlying secret %q has been deleted. You should recreate the underlying secret, or remove it from your configuration.", name), + }} + } + return diag.FromErr(err) + } + + configuredData := d.Get("data").(map[string]any) + + // stripping out the data not managed by Terraform + fieldManagerName := d.Get("field_manager").(string) + + managedSecretData, err := getManagedSecretData(res.GetManagedFields(), fieldManagerName) + if err != nil { + return diag.FromErr(err) + } + data := res.Data + for k := range data { + _, managed := managedSecretData["f:"+k] + _, configured := configuredData[k] + if !managed && !configured { + delete(data, k) + } + + } + decodedData := make(map[string]string, len(data)) + for k, v := range data { + decodedData[k] = string(v) + } + + d.Set("data", decodedData) + + return nil +} + +// getManagedSecretData reads the field manager metadata to discover which fields we're managing +func getManagedSecretData(managedFields []v1.ManagedFieldsEntry, manager string) (map[string]interface{}, error) { + var data map[string]any + for _, m := range managedFields { + // Only consider entries managed by the specified manager + if m.Manager != manager { + continue + } + var mm map[string]any + err := json.Unmarshal(m.FieldsV1.Raw, &mm) + if err != nil { + return nil, err + } + // Check if the "data" field exists and extract it + if l, ok := mm["f:data"].(map[string]any); ok { + data = l + } + } + return data, nil +} + +func resourceKubernetesSecretV1DataUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + conn, err := m.(KubeClientsets).MainClientset() + if err != nil { + return diag.FromErr(err) + } + + metadata := expandMetadata(d.Get("metadata").([]any)) + name := metadata.GetName() + namespace := metadata.GetNamespace() + + _, err = conn.CoreV1().Secrets(namespace).Get(ctx, name, v1.GetOptions{}) + if err != nil { + if d.Id() == "" { + // If we are deleting then there is nothing to do if the resource is gone + return nil + } + if statusErr, ok := err.(*errors.StatusError); ok && errors.IsNotFound(statusErr) { + return diag.Errorf("The Secret %q does not exist", name) + } + return diag.Errorf("Have got the following error while validating the existence of the Secret %q: %v", name, err) + } + + // Craft the patch to update the data + data := d.Get("data").(map[string]any) + if d.Id() == "" { + // If we're deleting then we just patch with an empty data map + data = map[string]interface{}{} + } + + encodedData := make(map[string][]byte, len(data)) + for k, v := range data { + encodedData[k] = []byte(v.(string)) + } + + patchobj := map[string]any{ + "apiVersion": "v1", + "kind": "Secret", + "metadata": map[string]any{ + "name": name, + "namespace": namespace, + }, + "data": encodedData, + } + patch := unstructured.Unstructured{} + patch.Object = patchobj + patchbytes, err := patch.MarshalJSON() + if err != nil { + return diag.FromErr(err) + } + + // Apply the patch + _, err = conn.CoreV1().Secrets(namespace).Patch(ctx, + name, + types.ApplyPatchType, + patchbytes, + v1.PatchOptions{ + FieldManager: d.Get("field_manager").(string), + Force: ptr.To(d.Get("force").(bool)), + }, + ) + if err != nil { + if errors.IsConflict(err) { + return diag.Diagnostics{{ + Severity: diag.Error, + Summary: "Field manager conflict", + Detail: fmt.Sprintf("Another client is managing a field Terraform tried to update. Set 'force' to true to override: %v", err), + }} + } + return diag.FromErr(err) + } + + if d.Id() == "" { + return nil + } + + return resourceKubernetesSecretV1DataRead(ctx, d, m) +} + +func resourceKubernetesSecretV1DataDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + // sets resource id to an empty. Simulating the deletion. + d.SetId("") + // Now we are calling the update function, to update the resource state + return resourceKubernetesSecretV1DataUpdate(ctx, d, m) +} diff --git a/kubernetes/resource_kubernetes_secret_v1_data_test.go b/kubernetes/resource_kubernetes_secret_v1_data_test.go new file mode 100644 index 0000000000..a5c50afc63 --- /dev/null +++ b/kubernetes/resource_kubernetes_secret_v1_data_test.go @@ -0,0 +1,246 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package kubernetes + +import ( + "context" + "fmt" + "regexp" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Handling the case for a empty secret +func TestAccKubernetesSecretV1Data_empty(t *testing.T) { + name := fmt.Sprintf("tf-acc-test-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + namespace := "default" + resourceName := "kubernetes_secret_v1_data.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + createEmptySecret(name, namespace) + }, + IDRefreshName: resourceName, + IDRefreshIgnore: []string{"metadata.0.resource_version"}, + ProviderFactories: testAccProviderFactories, + CheckDestroy: func(s *terraform.State) error { + return destroySecret(name, namespace) + }, + Steps: []resource.TestStep{ + { + Config: testAccKubernetesSecretV1Data_empty(name), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(resourceName, "metadata.0.name", name), + resource.TestCheckResourceAttr(resourceName, "data.%", "0"), + resource.TestCheckResourceAttr(resourceName, "field_manager", "tftest"), + ), + }, + }, + }) +} + +func createEmptySecret(name, namespace string) error { + conn, err := testAccProvider.Meta().(KubeClientsets).MainClientset() + if err != nil { + return err + } + ctx := context.Background() + + secret := v1.Secret{} + secret.SetName(name) + secret.SetNamespace(namespace) + secret.Data = map[string][]byte{} + _, err = conn.CoreV1().Secrets(namespace).Create(ctx, &secret, metav1.CreateOptions{ + FieldManager: "tftest", + }) + return err +} + +// Handling the case of secret creation with basic data +func TestAccKubernetesSecretV1Data_basic_data(t *testing.T) { + name := fmt.Sprintf("tf-acc-test-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + namespace := "default" + resourceName := "kubernetes_secret_v1_data.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + createSecretWithData(name, namespace) + }, + IDRefreshName: resourceName, + IDRefreshIgnore: []string{"metadata.0.resource_version"}, + ProviderFactories: testAccProviderFactories, + CheckDestroy: func(s *terraform.State) error { + return destroySecret(name, namespace) + }, + Steps: []resource.TestStep{ + { + Config: testAccKubernetesSecretV1Data_basic(name), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(resourceName, "metadata.0.name", name), + resource.TestCheckResourceAttr(resourceName, "data.%", "2"), + resource.TestCheckResourceAttr(resourceName, "data.key1", "value1"), + resource.TestCheckResourceAttr(resourceName, "data.key2", "value2"), + resource.TestCheckResourceAttr(resourceName, "field_manager", "tftest"), + ), + }, + }, + }) +} + +func createSecretWithData(name, namespace string) error { + conn, err := testAccProvider.Meta().(KubeClientsets).MainClientset() + if err != nil { + return err + } + ctx := context.Background() + + data := map[string][]byte{ + "key1": []byte("value1"), + "key2": []byte("value2"), + } + + secret := v1.Secret{} + secret.SetName(name) + secret.SetNamespace(namespace) + secret.Data = data + _, err = conn.CoreV1().Secrets(namespace).Create(ctx, &secret, metav1.CreateOptions{ + FieldManager: "tftest", + }) + return err +} + +// Handling the case for a modified secret +func TestAccKubernetesSecretV1Data_modified(t *testing.T) { + name := fmt.Sprintf("tf-acc-test-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + namespace := "default" + resourceName := "kubernetes_secret_v1_data.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + createModifiedSecret(name, namespace) + }, + IDRefreshName: resourceName, + IDRefreshIgnore: []string{"metadata.0.resource_version"}, + ProviderFactories: testAccProviderFactories, + CheckDestroy: func(s *terraform.State) error { + return destroySecret(name, namespace) + }, + Steps: []resource.TestStep{ + { + Config: testAccKubernetesSecretV1Data_modified(name), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(resourceName, "metadata.0.name", name), + resource.TestCheckResourceAttr(resourceName, "data.%", "2"), + resource.TestCheckResourceAttr(resourceName, "data.key1", "one"), + resource.TestCheckResourceAttr(resourceName, "data.key3", "three"), + resource.TestCheckResourceAttr(resourceName, "field_manager", "tftest"), + ), + }, + }, + }) +} + +func createModifiedSecret(name, namespace string) error { + conn, err := testAccProvider.Meta().(KubeClientsets).MainClientset() + if err != nil { + return err + } + ctx := context.Background() + + data := map[string][]byte{ + "key1": []byte("one"), + "key3": []byte("three"), + } + + secret := v1.Secret{} + secret.SetName(name) + secret.SetNamespace(namespace) + secret.Data = data + _, err = conn.CoreV1().Secrets(namespace).Create(ctx, &secret, metav1.CreateOptions{ + FieldManager: "tftest", + }) + return err +} + +// deletes a kubernetes secret +func destroySecret(name, namespace string) error { + conn, err := testAccProvider.Meta().(KubeClientsets).MainClientset() + if err != nil { + return err + } + ctx := context.Background() + err = conn.CoreV1().Secrets(namespace).Delete(ctx, name, metav1.DeleteOptions{}) + return err +} + +// Handling the case where it attempts to read a secret that doesnt exist in the cluster +func TestAcctKubernetesSecretV1Data_validation(t *testing.T) { + name := fmt.Sprintf("tf-acc-test-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + resourceName := "kubernetes_secret_v1_data.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + IDRefreshName: resourceName, + IDRefreshIgnore: []string{"metadata.0.resource_version"}, + ProviderFactories: testAccProviderFactories, + Steps: []resource.TestStep{ + { + // Testing a non-existing secret + Config: testAccKubernetesSecretV1Data_empty(name), + ExpectError: regexp.MustCompile("The Secret .* does not exist"), + }, + }, + }) +} + +// Generate config for creating a secret with empty data +func testAccKubernetesSecretV1Data_empty(name string) string { + return fmt.Sprintf(`resource "kubernetes_secret_v1_data" "test" { + metadata { + name = %q + } + data = {} + field_manager = "tftest" + +} +`, name) +} + +// Generate some basic config, with a secret with basic data +func testAccKubernetesSecretV1Data_basic(name string) string { + return fmt.Sprintf(` +resource "kubernetes_secret_v1_data" "test" { + metadata { + name = %q + } + data = { + "key1" = "value1" + "key2" = "value2" + } + field_manager = "tftest" +} +`, name) +} + +func testAccKubernetesSecretV1Data_modified(name string) string { + return fmt.Sprintf(` +resource "kubernetes_secret_v1_data" "test" { + metadata { + name = %q + } + data = { + "key1" = "one" + "key3" = "three" + } + field_manager = "tftest" +} +`, name) +} diff --git a/kubernetes/schema_job_spec.go b/kubernetes/schema_job_spec.go index 1e7157bab3..243d8aae13 100644 --- a/kubernetes/schema_job_spec.go +++ b/kubernetes/schema_job_spec.go @@ -235,7 +235,7 @@ func jobSpecFields(specUpdatable bool) map[string]*schema.Schema { "ttl_seconds_after_finished": { Type: schema.TypeString, Optional: true, - ForceNew: false, + ForceNew: true, ValidateFunc: func(value interface{}, key string) ([]string, []error) { v, err := strconv.Atoi(value.(string)) if err != nil { diff --git a/kubernetes/schema_node_spec.go b/kubernetes/schema_node_spec.go index d87186039c..cf445f124c 100644 --- a/kubernetes/schema_node_spec.go +++ b/kubernetes/schema_node_spec.go @@ -124,6 +124,39 @@ func nodeStatusFields() map[string]*schema.Schema { }, }, }, + "conditions": { + Type: schema.TypeList, + Computed: true, + Description: "List of conditions describing each node's health and operational status.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "last_heartbeat_time": { + Type: schema.TypeString, + Computed: true, + }, + "last_transition_time": { + Type: schema.TypeString, + Computed: true, + }, + "reason": { + Type: schema.TypeString, + Computed: true, + }, + "message": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, } } diff --git a/kubernetes/structure_persistent_volume_spec.go b/kubernetes/structure_persistent_volume_spec.go index 2915d8ed7f..7980ddc9d0 100644 --- a/kubernetes/structure_persistent_volume_spec.go +++ b/kubernetes/structure_persistent_volume_spec.go @@ -67,27 +67,6 @@ func flattenAzureFilePersistentVolumeSource(in *v1.AzureFilePersistentVolumeSour return []interface{}{att} } -func flattenCephFSVolumeSource(in *v1.CephFSVolumeSource) []interface{} { - att := make(map[string]interface{}) - att["monitors"] = newStringSet(schema.HashString, in.Monitors) - if in.Path != "" { - att["path"] = in.Path - } - if in.User != "" { - att["user"] = in.User - } - if in.SecretFile != "" { - att["secret_file"] = in.SecretFile - } - if in.SecretRef != nil { - att["secret_ref"] = flattenLocalObjectReference(in.SecretRef) - } - if in.ReadOnly { - att["read_only"] = in.ReadOnly - } - return []interface{}{att} -} - func flattenCephFSPersistentVolumeSource(in *v1.CephFSPersistentVolumeSource) []interface{} { att := make(map[string]interface{}) att["monitors"] = newStringSet(schema.HashString, in.Monitors) @@ -625,32 +604,6 @@ func expandAzureFilePersistentVolumeSource(l []interface{}) *v1.AzureFilePersist return obj } -func expandCephFSVolumeSource(l []interface{}) *v1.CephFSVolumeSource { - if len(l) == 0 || l[0] == nil { - return &v1.CephFSVolumeSource{} - } - in := l[0].(map[string]interface{}) - obj := &v1.CephFSVolumeSource{ - Monitors: sliceOfString(in["monitors"].(*schema.Set).List()), - } - if v, ok := in["path"].(string); ok { - obj.Path = v - } - if v, ok := in["user"].(string); ok { - obj.User = v - } - if v, ok := in["secret_file"].(string); ok { - obj.SecretFile = v - } - if v, ok := in["secret_ref"].([]interface{}); ok && len(v) > 0 { - obj.SecretRef = expandLocalObjectReference(v) - } - if v, ok := in["read_only"].(bool); ok { - obj.ReadOnly = v - } - return obj -} - func expandCephFSPersistentVolumeSource(l []interface{}) *v1.CephFSPersistentVolumeSource { if len(l) == 0 || l[0] == nil { return &v1.CephFSPersistentVolumeSource{} @@ -1522,28 +1475,6 @@ func patchPersistentVolumeSource(pathPrefix, prefix string, d *schema.ResourceDa } } - if d.HasChange(prefix + "ceph_fs") { - oldIn, newIn := d.GetChange(prefix + "ceph_fs") - oldV, oldOk := oldIn.([]interface{}) - newV, newOk := newIn.([]interface{}) - - if newOk && len(newV) > 0 { - if oldOk && len(oldV) > 0 { - ops = append(ops, &ReplaceOperation{ - Path: pathPrefix + "/cephfs", - Value: expandCephFSVolumeSource(newV), - }) - } else { - ops = append(ops, &AddOperation{ - Path: pathPrefix + "/cephfs", - Value: expandCephFSVolumeSource(newV), - }) - } - } else if oldOk && len(oldV) > 0 { - ops = append(ops, &RemoveOperation{Path: pathPrefix + "/cephfs"}) - } - } - if d.HasChange(prefix + "fc") { oldIn, newIn := d.GetChange(prefix + "fc") oldV, oldOk := oldIn.([]interface{}) diff --git a/kubernetes/structures_node.go b/kubernetes/structures_node.go index 24dd1f06fe..fade3ad564 100644 --- a/kubernetes/structures_node.go +++ b/kubernetes/structures_node.go @@ -71,12 +71,29 @@ func flattenNodeInfo(in v1.NodeSystemInfo) []interface{} { return []interface{}{att} } +func flattenNodeConditions(conditions []v1.NodeCondition) []interface{} { + out := make([]interface{}, len(conditions)) + for i, condition := range conditions { + m := make(map[string]interface{}) + m["type"] = condition.Type + m["status"] = condition.Status + m["last_heartbeat_time"] = condition.LastHeartbeatTime.String() + m["last_transition_time"] = condition.LastTransitionTime.String() + m["reason"] = condition.Reason + m["message"] = condition.Message + out[i] = m + } + return out +} + func flattenNodeStatus(in v1.NodeStatus) []interface{} { att := make(map[string]interface{}) att["addresses"] = flattenAddresses(in.Addresses...) att["allocatable"] = flattenResourceList(in.Allocatable) att["capacity"] = flattenResourceList(in.Capacity) att["node_info"] = flattenNodeInfo(in.NodeInfo) + att["conditions"] = flattenNodeConditions(in.Conditions) + return []interface{}{att} } diff --git a/kubernetes/structures_pod.go b/kubernetes/structures_pod.go index ea5ccfdb86..6ab2427de7 100644 --- a/kubernetes/structures_pod.go +++ b/kubernetes/structures_pod.go @@ -410,9 +410,6 @@ func flattenVolumes(volumes []v1.Volume) []interface{} { if v.Cinder != nil { obj["cinder"] = flattenCinderVolumeSource(v.Cinder) } - if v.CephFS != nil { - obj["ceph_fs"] = flattenCephFSVolumeSource(v.CephFS) - } if v.CSI != nil { obj["csi"] = flattenCSIVolumeSource(v.CSI) } @@ -1604,9 +1601,6 @@ func expandVolumes(volumes []interface{}) ([]v1.Volume, error) { if v, ok := m["cinder"].([]interface{}); ok && len(v) > 0 { vl[i].Cinder = expandCinderVolumeSource(v) } - if v, ok := m["ceph_fs"].([]interface{}); ok && len(v) > 0 { - vl[i].CephFS = expandCephFSVolumeSource(v) - } if v, ok := m["csi"].([]interface{}); ok && len(v) > 0 { vl[i].CSI = expandCSIVolumeSource(v) } diff --git a/manifest/openapi/testdata/k8s-swagger.json b/manifest/openapi/testdata/k8s-swagger.json index 7c64728a2e..ad9f32929f 100644 --- a/manifest/openapi/testdata/k8s-swagger.json +++ b/manifest/openapi/testdata/k8s-swagger.json @@ -80318,42 +80318,6 @@ } } }, - "io.k8s.api.core.v1.CephFSVolumeSource": { - "description": "Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling.", - "type": "object", - "required": [ - "monitors" - ], - "properties": { - "monitors": { - "description": "Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", - "type": "array", - "items": { - "type": "string" - } - }, - "path": { - "description": "Optional: Used as the mounted root, rather than the full Ceph tree, default is /", - "type": "string" - }, - "readOnly": { - "description": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", - "type": "boolean" - }, - "secretFile": { - "description": "Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", - "type": "string" - }, - "secretRef": { - "description": "Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", - "$ref": "#/definitions/io.k8s.api.core.v1.LocalObjectReference" - }, - "user": { - "description": "Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", - "type": "string" - } - } - }, "io.k8s.api.core.v1.CinderPersistentVolumeSource": { "description": "Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling.", "type": "object", @@ -85481,10 +85445,6 @@ "description": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.", "$ref": "#/definitions/io.k8s.api.core.v1.AzureFileVolumeSource" }, - "cephfs": { - "description": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime", - "$ref": "#/definitions/io.k8s.api.core.v1.CephFSVolumeSource" - }, "cinder": { "description": "Cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", "$ref": "#/definitions/io.k8s.api.core.v1.CinderVolumeSource" diff --git a/manifest/provider/apply.go b/manifest/provider/apply.go index 22451dc5ed..ec95a8402f 100644 --- a/manifest/provider/apply.go +++ b/manifest/provider/apply.go @@ -540,16 +540,25 @@ func (s *RawProviderServer) ApplyResourceChange(ctx context.Context, req *tfprot err = rs.Delete(ctxDeadline, rname, metav1.DeleteOptions{}) if err != nil { - rn := types.NamespacedName{Namespace: rnamespace, Name: rname}.String() - resp.Diagnostics = append(resp.Diagnostics, - &tfprotov5.Diagnostic{ - Severity: tfprotov5.DiagnosticSeverityError, - Summary: fmt.Sprintf("Error deleting resource %s: %s", rn, err), - Detail: err.Error(), - }) + if apierrors.IsNotFound(err) { + s.logger.Trace("[ApplyResourceChange][Delete]", "Resource is already deleted") + + resp.Diagnostics = append(resp.Diagnostics, + &tfprotov5.Diagnostic{ + Severity: tfprotov5.DiagnosticSeverityWarning, + Summary: fmt.Sprintf("Resource %q was already deleted", rname), + Detail: fmt.Sprintf("The resource %q was not found in the Kubernetes API. This may be due to the resource being already deleted.", rname), + }) + } else { + resp.Diagnostics = append(resp.Diagnostics, + &tfprotov5.Diagnostic{ + Severity: tfprotov5.DiagnosticSeverityError, + Summary: fmt.Sprintf("Error deleting resource %s: %s", rname, err), + Detail: err.Error(), + }) + } return resp, nil } - // wait for delete for { if time.Now().After(deadline) { diff --git a/manifest/test/acceptance/delete_not_found_test.go b/manifest/test/acceptance/delete_not_found_test.go new file mode 100644 index 0000000000..90f242d097 --- /dev/null +++ b/manifest/test/acceptance/delete_not_found_test.go @@ -0,0 +1,77 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build acceptance +// +build acceptance + +package acceptance + +import ( + "context" + "testing" + "time" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/terraform-provider-kubernetes/manifest/provider" + "github.com/hashicorp/terraform-provider-kubernetes/manifest/test/helper/kubernetes" +) + +func TestKubernetesManifest_DeletionNotFound(t *testing.T) { + ctx := context.Background() + + reattachInfo, err := provider.ServeTest(ctx, hclog.Default(), t) + if err != nil { + t.Fatalf("Failed to create provider instance: %v", err) + } + + name := randName() + namespace := randName() + + tf := tfhelper.RequireNewWorkingDir(ctx, t) + tf.SetReattachInfo(ctx, reattachInfo) + + k8shelper.CreateNamespace(t, namespace) + t.Logf("Verifying if namespace %s exists", namespace) + k8shelper.AssertResourceExists(t, "v1", "namespaces", namespace) + + defer func() { + tf.Destroy(ctx) + tf.Close() + k8shelper.DeleteResource(t, namespace, kubernetes.NewGroupVersionResource("v1", "namespaces")) + k8shelper.AssertResourceDoesNotExist(t, "v1", "namespaces", namespace) + }() + + tfvars := TFVARS{ + "namespace": namespace, + "name": name, + } + + // Load the Terraform config that will create the ConfigMap + tfconfig := loadTerraformConfig(t, "DeleteNotFoundTest/resource.tf", tfvars) + tf.SetConfig(ctx, tfconfig) + + t.Log("Applying Terraform configuration to create ConfigMap") + if err := tf.Apply(ctx); err != nil { + t.Fatalf("Terraform apply failed: %v", err) + } + + state, err := tf.State(ctx) + if err != nil { + t.Fatalf("Failed to retrieve Terraform state: %v", err) + } + t.Logf("Terraform state: %v", state) + + time.Sleep(2 * time.Second) + + t.Logf("Checking if ConfigMap %s in namespace %s was created", name, namespace) + k8shelper.AssertNamespacedResourceExists(t, "v1", "configmaps", namespace, name) + + // Simulating the deletion of the resource outside of Terraform + k8shelper.DeleteNamespacedResource(t, name, namespace, kubernetes.NewGroupVersionResource("v1", "configmaps")) + + // Running tf destroy in order to check if we are handling "404 Not Found" gracefully + tf.Destroy(ctx) + + // Ensuring that the ConfigMap no longer exists + k8shelper.AssertNamespacedResourceDoesNotExist(t, "v1", "configmaps", namespace, name) +} diff --git a/manifest/test/acceptance/testdata/DeleteNotFoundTest/resource.tf b/manifest/test/acceptance/testdata/DeleteNotFoundTest/resource.tf new file mode 100644 index 0000000000..8d875bbdd0 --- /dev/null +++ b/manifest/test/acceptance/testdata/DeleteNotFoundTest/resource.tf @@ -0,0 +1,16 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 +resource "kubernetes_manifest" "test" { + manifest = { + "apiVersion" = "v1" + "kind" = "ConfigMap" + "metadata" = { + "name" = var.name + "namespace" = var.namespace + } + "data" = { + "foo" = "bar" + } + } +} + diff --git a/manifest/test/acceptance/testdata/DeleteNotFoundTest/variables.tf b/manifest/test/acceptance/testdata/DeleteNotFoundTest/variables.tf new file mode 100644 index 0000000000..f526a6a4bf --- /dev/null +++ b/manifest/test/acceptance/testdata/DeleteNotFoundTest/variables.tf @@ -0,0 +1,19 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +# These variable declarations are only used for interactive testing. +# The test code will template in different variable declarations with a default value when running the test. +# +# To set values for interactive runs, create a var-file and set values in it. +# If the name of the var-file ends in '.auto.tfvars' (e.g. myvalues.auto.tfvars) +# it will be automatically picked up and used by Terraform. +# +# DO NOT check in any files named *.auto.tfvars when making changes to tests. + +variable "name" { + type = string +} + +variable "namespace" { + type = string +} \ No newline at end of file