From 94f7f22a34bb6df9ceeb98e01260bdc1d6244247 Mon Sep 17 00:00:00 2001 From: Alexis MacAskill Date: Wed, 7 Aug 2024 01:50:05 +0000 Subject: [PATCH] Add terraform support for storage_pools on cluster/nodepool create, and nodepool update --- .../services/container/node_config.go.erb | 19 ++ .../resource_container_cluster_test.go.erb | 189 ++++++++++++++++++ .../resource_container_node_pool.go.erb | 18 +- .../resource_container_node_pool_test.go.erb | 147 +++++++++++++- .../docs/r/container_cluster.html.markdown | 2 + 5 files changed, 371 insertions(+), 4 deletions(-) diff --git a/mmv1/third_party/terraform/services/container/node_config.go.erb b/mmv1/third_party/terraform/services/container/node_config.go.erb index 45c732d0fa41..695aa3150c5f 100644 --- a/mmv1/third_party/terraform/services/container/node_config.go.erb +++ b/mmv1/third_party/terraform/services/container/node_config.go.erb @@ -456,6 +456,14 @@ func schemaNodeConfig() *schema.Schema { Description: `The list of instance tags applied to all nodes.`, }, + "storage_pools": { + Type: schema.TypeList, + ForceNew: true, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `The list of Storage Pools where boot disks are provisioned.`, + }, + "shielded_instance_config": { Type: schema.TypeList, Optional: true, @@ -1011,6 +1019,16 @@ func expandNodeConfig(v interface{}) *container.NodeConfig { nc.Tags = tags } + if v, ok := nodeConfig["storage_pools"]; ok { + spList := v.([]interface{}) + storagePools := []string{} + for _, v := range spList { + if v != nil { + storagePools = append(storagePools, v.(string)) + } + } + nc.StoragePools = storagePools + } if v, ok := nodeConfig["shielded_instance_config"]; ok && len(v.([]interface{})) > 0 { conf := v.([]interface{})[0].(map[string]interface{}) nc.ShieldedInstanceConfig = &container.ShieldedInstanceConfig{ @@ -1413,6 +1431,7 @@ func flattenNodeConfig(c *container.NodeConfig, v interface{}) []map[string]inte "tags": c.Tags, "preemptible": c.Preemptible, "secondary_boot_disks": flattenSecondaryBootDisks(c.SecondaryBootDisks), + "storage_pools": c.StoragePools, "spot": c.Spot, "min_cpu_platform": c.MinCpuPlatform, "shielded_instance_config": flattenShieldedInstanceConfig(c.ShieldedInstanceConfig), diff --git a/mmv1/third_party/terraform/services/container/resource_container_cluster_test.go.erb b/mmv1/third_party/terraform/services/container/resource_container_cluster_test.go.erb index 3fbe2fc43b1d..d663c18a9515 100644 --- a/mmv1/third_party/terraform/services/container/resource_container_cluster_test.go.erb +++ b/mmv1/third_party/terraform/services/container/resource_container_cluster_test.go.erb @@ -6,12 +6,16 @@ import ( "fmt" "testing" "regexp" + "net/http" + "time" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-google/google/acctest" "github.com/hashicorp/terraform-provider-google/google/envvar" "github.com/hashicorp/terraform-provider-google/google/services/container" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + tpgcompute "github.com/hashicorp/terraform-provider-google/google/services/compute" ) func TestAccContainerCluster_basic(t *testing.T) { @@ -11188,3 +11192,188 @@ resource "google_container_cluster" "primary" { } `, secretID, clusterName, networkName, subnetworkName) } + + +func TestAccContainerCluster_storagePoolsWithNodePool(t *testing.T) { + t.Parallel() + + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + np := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + pid := envvar.GetTestProjectFromEnv() + location := envvar.GetTestZoneFromEnv() + storagePoolName := fmt.Sprintf("tf-test-storage-pool-node-pool") + storagePoolResourceName := fmt.Sprintf("projects/%s/zones/%s/storagePools/%s", pid, location, storagePoolName) + t.Cleanup(func() { + cleanupTestingStoragePool(t, storagePoolName) + }) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), + Steps: []resource.TestStep{ + { + PreConfig: setupTestingStoragePool_HyperdiskBalanced(t, storagePoolName), + Config: testAccContainerCluster_storagePoolsWithNodePool(cluster, np, networkName, subnetworkName, storagePoolResourceName, location), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_cluster.storage_pools_with_node_pool", "node_pool.0.node_config.0.storage_pools.0", storagePoolResourceName), + ), + }, + { + ResourceName: "google_container_cluster.storage_pools_with_node_pool", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func testAccContainerCluster_storagePoolsWithNodePool(cluster, np, networkName, subnetworkName, storagePoolResourceName, location string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "storage_pools_with_node_pool" { + name = "%[1]s" + location = "%[6]s" + deletion_protection = false + network = "%[3]s" + subnetwork = "%[4]s" + node_pool { + name = "%[2]s" + initial_node_count = 1 + node_config { + machine_type = "c3-standard-4" + image_type = "COS_CONTAINERD" + storage_pools = ["%[5]s"] + disk_type = "hyperdisk-balanced" + } + } +} +`, cluster, np, networkName, subnetworkName, storagePoolResourceName, location) +} + +func TestAccContainerCluster_storagePoolsWithNodeConfig(t *testing.T) { + t.Parallel() + + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + np := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + pid := envvar.GetTestProjectFromEnv() + location := envvar.GetTestZoneFromEnv() + storagePoolName := fmt.Sprintf("tf-test-storage-pool-node-config") + storagePoolResourceName := fmt.Sprintf("projects/%s/zones/%s/storagePools/%s", pid, location, storagePoolName) + t.Cleanup(func() { + cleanupTestingStoragePool(t, storagePoolName) + }) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), + Steps: []resource.TestStep{ + { + PreConfig: setupTestingStoragePool_HyperdiskBalanced(t, storagePoolName), + Config: testAccContainerCluster_storagePoolsWithNodeConfig(cluster, np, networkName, subnetworkName, storagePoolResourceName, location), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_cluster.storage_pools_with_node_config", "node_config.0.storage_pools.0", storagePoolResourceName), + ), + }, + { + ResourceName: "google_container_cluster.storage_pools_with_node_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func testAccContainerCluster_storagePoolsWithNodeConfig(cluster, np, networkName, subnetworkName, storagePoolResourceName, location string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "storage_pools_with_node_config" { + name = "%[1]s" + location = "%[6]s" + initial_node_count = 1 + deletion_protection = false + network = "%[3]s" + subnetwork = "%[4]s" + node_config { + machine_type = "c3-standard-4" + image_type = "COS_CONTAINERD" + storage_pools = ["%[5]s"] + disk_type = "hyperdisk-balanced" + } +} +`, cluster, np, networkName, subnetworkName, storagePoolResourceName, location) +} + +func setupTestingStoragePool_HyperdiskBalanced(t *testing.T, storagePoolName string) func() { + return func() { + config := acctest.GoogleProviderConfig(t) + headers := make(http.Header) + project := envvar.GetTestProjectFromEnv() + zone := envvar.GetTestZoneFromEnv() + url := fmt.Sprintf("%sprojects/%s/zones/%s/storagePools", config.ComputeBasePath, project, zone) + storagePoolTypeUrl := fmt.Sprintf("/projects/%s/zones/%s/storagePoolTypes/hyperdisk-balanced", project, zone) + defaultTimeout := 20 * time.Minute + obj := make(map[string]interface{}) + obj["name"] = storagePoolName + obj["poolProvisionedCapacityGb"] = 10240 + obj["poolProvisionedIops"] = 10000 + obj["poolProvisionedThroughput"] = 1024 + obj["storagePoolType"] = storagePoolTypeUrl + obj["capacityProvisioningType"] = "ADVANCED" + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: config.UserAgent, + Body: obj, + Timeout: defaultTimeout, + Headers: headers, + }) + if err != nil { + t.Errorf("Error creating StoragePool: %s", err) + // Clean up storage pool if it fails due to already exists. + cleanupTestingStoragePool(t, storagePoolName) + } + + err = tpgcompute.ComputeOperationWaitTime(config, res, project, "Creating StoragePool", config.UserAgent, defaultTimeout) + if err != nil { + t.Errorf("Error waiting to create StoragePool: %s", err) + } + } +} + +func cleanupTestingStoragePool(t *testing.T, storagePoolName string) { + config := acctest.GoogleProviderConfig(t) + headers := make(http.Header) + project := envvar.GetTestProjectFromEnv() + zone := envvar.GetTestZoneFromEnv() + url := fmt.Sprintf("%sprojects/%s/zones/%s/storagePools/%s", config.ComputeBasePath, project, zone, storagePoolName) + defaultTimeout := 20 * time.Minute + var obj map[string]interface{} + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: project, + RawURL: url, + UserAgent: config.UserAgent, + Body: obj, + Timeout: defaultTimeout, + Headers: headers, + }) + if err != nil { + t.Errorf("Error deleting StoragePool: %s", err) + } + + err = tpgcompute.ComputeOperationWaitTime(config, res, project, "Deleting StoragePool", config.UserAgent, defaultTimeout) + if err != nil { + t.Errorf("Error waiting to delete StoragePool: %s", err) + } +} \ No newline at end of file diff --git a/mmv1/third_party/terraform/services/container/resource_container_node_pool.go.erb b/mmv1/third_party/terraform/services/container/resource_container_node_pool.go.erb index 8f8c49e2ca0b..d9c82f4d5ab1 100644 --- a/mmv1/third_party/terraform/services/container/resource_container_node_pool.go.erb +++ b/mmv1/third_party/terraform/services/container/resource_container_node_pool.go.erb @@ -1453,13 +1453,25 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node if d.HasChange("node_config.0.disk_size_gb") || d.HasChange("node_config.0.disk_type") || - d.HasChange("node_config.0.machine_type") { + d.HasChange("node_config.0.machine_type") || + d.HasChange("node_config.0.storage_pools") { req := &container.UpdateNodePoolRequest{ Name: name, DiskSizeGb: int64(d.Get("node_config.0.disk_size_gb").(int)), DiskType: d.Get("node_config.0.disk_type").(string), MachineType: d.Get("node_config.0.machine_type").(string), } + if v, ok := d.GetOk("node_config.0.storage_pools"); ok { + spList := v.([]interface{}) + storagePools := []string{} + for _, v := range spList { + if v != nil { + storagePools = append(storagePools, v.(string)) + } + } + req.StoragePools = storagePools + } + updateF := func() error { clusterNodePoolsUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(name), req) if config.UserProjectOverride { @@ -1474,14 +1486,14 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node return ContainerOperationWait(config, op, nodePoolInfo.project, nodePoolInfo.location, - "updating GKE node pool disk_size_gb/disk_type/machine_type", userAgent, + "updating GKE node pool disk_size_gb/disk_type/machine_type/storage_pools", userAgent, timeout) } if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { return err } - log.Printf("[INFO] Updated disk disk_size_gb/disk_type/machine_type for Node Pool %s", d.Id()) + log.Printf("[INFO] Updated disk disk_size_gb/disk_type/machine_type/storage_pools for Node Pool %s", d.Id()) } if d.HasChange(prefix + "node_config.0.taint") { diff --git a/mmv1/third_party/terraform/services/container/resource_container_node_pool_test.go.erb b/mmv1/third_party/terraform/services/container/resource_container_node_pool_test.go.erb index 040375223ded..b53b4e93850b 100644 --- a/mmv1/third_party/terraform/services/container/resource_container_node_pool_test.go.erb +++ b/mmv1/third_party/terraform/services/container/resource_container_node_pool_test.go.erb @@ -4929,4 +4929,149 @@ resource "google_container_node_pool" "np" { } } `, cluster, np) -} \ No newline at end of file +} + +func TestAccContainerNodePool_storagePools(t *testing.T) { + t.Parallel() + + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + np := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + pid := envvar.GetTestProjectFromEnv() + location := envvar.GetTestZoneFromEnv() + storagePoolName := fmt.Sprintf("tf-test-storage-pool-node-pool") + storagePoolResourceName := fmt.Sprintf("projects/%s/zones/%s/storagePools/%s", pid, location, storagePoolName) + t.Cleanup(func() { + cleanupTestingStoragePool(t, storagePoolName) + }) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), + Steps: []resource.TestStep{ + { + PreConfig: setupTestingStoragePool_HyperdiskBalanced(t, storagePoolName), + Config: testAccContainerNodePool_storagePools(cluster, np, networkName, subnetworkName, storagePoolResourceName, location), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_node_pool.np", "node_pool.0.node_config.0.storage_pools.0", storagePoolResourceName), + ), + }, + { + ResourceName: "google_container_node_pool.np", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func testAccContainerNodePool_storagePools(cluster, np, networkName, subnetworkName, storagePoolResourceName, location string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "cluster" { + name = "%[1]s" + location = "%[6]s" + initial_node_count = 1 + deletion_protection = false + network = "%[3]s" + subnetwork = "%[4]s" +} + +resource "google_container_node_pool" "np" { + name = "%[2]s" + location = "%[6]s" + cluster = google_container_cluster.cluster.name + initial_node_count = 1 + + node_config { + machine_type = "c3-standard-4" + image_type = "COS_CONTAINERD" + storage_pools = ["%[5]s"] + disk_type = "hyperdisk-balanced" + } +} +`, cluster, np, networkName, subnetworkName, storagePoolResourceName, location) +} + + +func TestAccContainerNodePool_withMachineDiskStoragePoolsUpdate(t *testing.T) { + t.Parallel() + + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + nodePool := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + pid := envvar.GetTestProjectFromEnv() + location := envvar.GetTestZoneFromEnv() + storagePoolName := fmt.Sprintf("tf-test-storage-pool-update") + storagePoolResourceName := fmt.Sprintf("projects/%s/zones/%s/storagePools/%s", pid, location, storagePoolName) + t.Cleanup(func() { + cleanupTestingStoragePool(t, storagePoolName) + }) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerNodePool_basic(cluster, nodePool, networkName, subnetworkName), + }, + { + ResourceName: "google_container_node_pool.np", + ImportState: true, + ImportStateVerify: true, + }, + { + PreConfig: setupTestingStoragePool_HyperdiskBalanced(t, storagePoolName), + Config: testAccContainerNodePool_withDiskMachineAndStoragePoolUpdate(cluster, nodePool, networkName, subnetworkName, storagePoolResourceName, location), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_node_pool.np", "node_config.0.storage_pools.0", storagePoolResourceName), + ), + }, + { + ResourceName: "google_container_node_pool.np", + ImportState: true, + ImportStateVerify: true, + // autoscaling.# = 0 is equivalent to no autoscaling at all, + // but will still cause an import diff + ImportStateVerifyIgnore: []string{"autoscaling.#", "node_config.0.taint", "deletion_protection"}, + }, + }, + }) +} + +func testAccContainerNodePool_withDiskMachineAndStoragePoolUpdate(cluster, np, networkName, subnetworkName, storagePoolResourceName, location string) string { + return fmt.Sprintf(` +provider "google" { + alias = "user-project-override" + user_project_override = true +} +resource "google_container_cluster" "cluster" { + provider = google.user-project-override + name = "%[1]s" + location = "%[6]s" + initial_node_count = 3 + deletion_protection = false + network = "%[3]s" + subnetwork = "%[4]s" +} + +resource "google_container_node_pool" "np" { + provider = google.user-project-override + name = "%[1]s" + location = "%[6]s" + cluster = google_container_cluster.cluster.name + initial_node_count = 2 + + node_config { + machine_type = "c3-standard-4" + disk_size_gb = 50 + disk_type = "hyperdisk-balanced" + storage_pools = ["%[5]s"] + } +} +`, cluster, np, networkName, subnetworkName, storagePoolResourceName, location) +} diff --git a/mmv1/third_party/terraform/website/docs/r/container_cluster.html.markdown b/mmv1/third_party/terraform/website/docs/r/container_cluster.html.markdown index 73120a1c1438..ec7903529785 100644 --- a/mmv1/third_party/terraform/website/docs/r/container_cluster.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/container_cluster.html.markdown @@ -931,6 +931,8 @@ gvnic { * `shielded_instance_config` - (Optional) Shielded Instance options. Structure is [documented below](#nested_shielded_instance_config). +* `storage_pools` - (Optional) The list of Storage Pools where boot disks are provisioned. + * `tags` - (Optional) The list of instance tags applied to all nodes. Tags are used to identify valid sources or targets for network firewalls.