From 17cf9945a176c35a3112fd7d8a5f1a9c58e74749 Mon Sep 17 00:00:00 2001 From: Alexis MacAskill Date: Mon, 9 Sep 2024 11:51:11 -0700 Subject: [PATCH] Add for storage_pools flag on cluster/nodepool create, and nodepool update (#11391) --- .../services/container/node_config.go.erb | 19 +++ .../resource_container_cluster_test.go.erb | 124 +++++++++++++++ .../resource_container_node_pool.go.erb | 18 ++- .../resource_container_node_pool_test.go.erb | 143 ++++++++++++++++++ .../docs/r/container_cluster.html.markdown | 2 + 5 files changed, 303 insertions(+), 3 deletions(-) diff --git a/mmv1/third_party/terraform/services/container/node_config.go.erb b/mmv1/third_party/terraform/services/container/node_config.go.erb index 3165f7c8bb79..ebe0925870b7 100644 --- a/mmv1/third_party/terraform/services/container/node_config.go.erb +++ b/mmv1/third_party/terraform/services/container/node_config.go.erb @@ -461,6 +461,14 @@ func schemaNodeConfig() *schema.Schema { Description: `The list of instance tags applied to all nodes.`, }, + "storage_pools": { + Type: schema.TypeList, + ForceNew: true, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `The list of Storage Pools where boot disks are provisioned.`, + }, + "shielded_instance_config": { Type: schema.TypeList, Optional: true, @@ -1039,6 +1047,16 @@ func expandNodeConfig(v interface{}) *container.NodeConfig { nc.Tags = tags } + if v, ok := nodeConfig["storage_pools"]; ok { + spList := v.([]interface{}) + storagePools := []string{} + for _, v := range spList { + if v != nil { + storagePools = append(storagePools, v.(string)) + } + } + nc.StoragePools = storagePools + } if v, ok := nodeConfig["shielded_instance_config"]; ok && len(v.([]interface{})) > 0 { conf := v.([]interface{})[0].(map[string]interface{}) nc.ShieldedInstanceConfig = &container.ShieldedInstanceConfig{ @@ -1458,6 +1476,7 @@ func flattenNodeConfig(c *container.NodeConfig, v interface{}) []map[string]inte "tags": c.Tags, "preemptible": c.Preemptible, "secondary_boot_disks": flattenSecondaryBootDisks(c.SecondaryBootDisks), + "storage_pools": c.StoragePools, "spot": c.Spot, "min_cpu_platform": c.MinCpuPlatform, "shielded_instance_config": flattenShieldedInstanceConfig(c.ShieldedInstanceConfig), diff --git a/mmv1/third_party/terraform/services/container/resource_container_cluster_test.go.erb b/mmv1/third_party/terraform/services/container/resource_container_cluster_test.go.erb index 29e6d6f4d4fb..b5be556383a6 100644 --- a/mmv1/third_party/terraform/services/container/resource_container_cluster_test.go.erb +++ b/mmv1/third_party/terraform/services/container/resource_container_cluster_test.go.erb @@ -11547,3 +11547,127 @@ resource "google_container_cluster" "primary" { } `, name, networkName, subnetworkName) } + +func TestAccContainerCluster_storagePoolsWithNodePool(t *testing.T) { + t.Parallel() + + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + np := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + location := envvar.GetTestZoneFromEnv() + + storagePoolNameURL := acctest.BootstrapComputeStoragePool(t, "basic-1", "hyperdisk-balanced") + storagePoolResourceName, err := extractSPName(storagePoolNameURL) + if err != nil { + t.Fatal("Failed to extract Storage Pool resource name from URL.") + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_storagePoolsWithNodePool(cluster, location, networkName, subnetworkName, np, storagePoolResourceName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_cluster.storage_pools_with_node_pool", "node_pool.0.node_config.0.storage_pools.0", storagePoolResourceName), + ), + }, + { + ResourceName: "google_container_cluster.storage_pools_with_node_pool", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func testAccContainerCluster_storagePoolsWithNodePool(cluster, location, networkName, subnetworkName, np, storagePoolResourceName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "storage_pools_with_node_pool" { + name = "%s" + location = "%s" + deletion_protection = false + network = "%s" + subnetwork = "%s" + node_pool { + name = "%s" + initial_node_count = 1 + node_config { + machine_type = "c3-standard-4" + image_type = "COS_CONTAINERD" + storage_pools = ["%s"] + disk_type = "hyperdisk-balanced" + } + } +} +`, cluster, location, networkName, subnetworkName, np, storagePoolResourceName) +} + +func TestAccContainerCluster_storagePoolsWithNodeConfig(t *testing.T) { + t.Parallel() + + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + location := envvar.GetTestZoneFromEnv() + + storagePoolNameURL := acctest.BootstrapComputeStoragePool(t, "basic-1", "hyperdisk-balanced") + storagePoolResourceName, err := extractSPName(storagePoolNameURL) + if err != nil { + t.Fatal("Failed to extract Storage Pool resource name from URL.") + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_storagePoolsWithNodeConfig(cluster, location, networkName, subnetworkName, storagePoolResourceName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_cluster.storage_pools_with_node_config", "node_config.0.storage_pools.0", storagePoolResourceName), + ), + }, + { + ResourceName: "google_container_cluster.storage_pools_with_node_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func testAccContainerCluster_storagePoolsWithNodeConfig(cluster, location, networkName, subnetworkName, storagePoolResourceName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "storage_pools_with_node_config" { + name = "%s" + location = "%s" + initial_node_count = 1 + deletion_protection = false + network = "%s" + subnetwork = "%s" + node_config { + machine_type = "c3-standard-4" + image_type = "COS_CONTAINERD" + storage_pools = ["%s"] + disk_type = "hyperdisk-balanced" + } +} +`, cluster, location, networkName, subnetworkName, storagePoolResourceName) +} + +func extractSPName(url string) (string, error) { + re := regexp.MustCompile(`https://www\.googleapis\.com/compute/beta/(projects/[^"]+)`) + matches := re.FindStringSubmatch(url) + + if len(matches) > 1 { + return matches[1], nil + } else { + return "", fmt.Errorf("no match found") + } +} + diff --git a/mmv1/third_party/terraform/services/container/resource_container_node_pool.go.erb b/mmv1/third_party/terraform/services/container/resource_container_node_pool.go.erb index 8f8c49e2ca0b..d9c82f4d5ab1 100644 --- a/mmv1/third_party/terraform/services/container/resource_container_node_pool.go.erb +++ b/mmv1/third_party/terraform/services/container/resource_container_node_pool.go.erb @@ -1453,13 +1453,25 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node if d.HasChange("node_config.0.disk_size_gb") || d.HasChange("node_config.0.disk_type") || - d.HasChange("node_config.0.machine_type") { + d.HasChange("node_config.0.machine_type") || + d.HasChange("node_config.0.storage_pools") { req := &container.UpdateNodePoolRequest{ Name: name, DiskSizeGb: int64(d.Get("node_config.0.disk_size_gb").(int)), DiskType: d.Get("node_config.0.disk_type").(string), MachineType: d.Get("node_config.0.machine_type").(string), } + if v, ok := d.GetOk("node_config.0.storage_pools"); ok { + spList := v.([]interface{}) + storagePools := []string{} + for _, v := range spList { + if v != nil { + storagePools = append(storagePools, v.(string)) + } + } + req.StoragePools = storagePools + } + updateF := func() error { clusterNodePoolsUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(name), req) if config.UserProjectOverride { @@ -1474,14 +1486,14 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node return ContainerOperationWait(config, op, nodePoolInfo.project, nodePoolInfo.location, - "updating GKE node pool disk_size_gb/disk_type/machine_type", userAgent, + "updating GKE node pool disk_size_gb/disk_type/machine_type/storage_pools", userAgent, timeout) } if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { return err } - log.Printf("[INFO] Updated disk disk_size_gb/disk_type/machine_type for Node Pool %s", d.Id()) + log.Printf("[INFO] Updated disk disk_size_gb/disk_type/machine_type/storage_pools for Node Pool %s", d.Id()) } if d.HasChange(prefix + "node_config.0.taint") { diff --git a/mmv1/third_party/terraform/services/container/resource_container_node_pool_test.go.erb b/mmv1/third_party/terraform/services/container/resource_container_node_pool_test.go.erb index 794e0c90f1e7..5adcfc374cc3 100644 --- a/mmv1/third_party/terraform/services/container/resource_container_node_pool_test.go.erb +++ b/mmv1/third_party/terraform/services/container/resource_container_node_pool_test.go.erb @@ -4873,3 +4873,146 @@ resource "google_container_node_pool" "np" { } `, cluster, np) } + +func TestAccContainerNodePool_storagePools(t *testing.T) { + t.Parallel() + + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + np := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + location := envvar.GetTestZoneFromEnv() + + storagePoolNameURL := acctest.BootstrapComputeStoragePool(t, "basic-1", "hyperdisk-balanced") + storagePoolResourceName, err := extractSPName(storagePoolNameURL) + if err != nil { + t.Fatal("Failed to extract Storage Pool resource name from URL.") + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerNodePool_storagePools(cluster, np, networkName, subnetworkName, storagePoolResourceName, location), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_node_pool.np", "node_config.0.storage_pools.0", storagePoolResourceName), + ), + }, + { + ResourceName: "google_container_node_pool.np", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func testAccContainerNodePool_storagePools(cluster, np, networkName, subnetworkName, storagePoolResourceName, location string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "cluster" { + name = "%[1]s" + location = "%[6]s" + initial_node_count = 1 + deletion_protection = false + network = "%[3]s" + subnetwork = "%[4]s" +} + +resource "google_container_node_pool" "np" { + name = "%[2]s" + location = "%[6]s" + cluster = google_container_cluster.cluster.name + initial_node_count = 1 + + node_config { + machine_type = "c3-standard-4" + image_type = "COS_CONTAINERD" + storage_pools = ["%[5]s"] + disk_type = "hyperdisk-balanced" + } +} +`, cluster, np, networkName, subnetworkName, storagePoolResourceName, location) +} + + +func TestAccContainerNodePool_withMachineDiskStoragePoolsUpdate(t *testing.T) { + t.Parallel() + + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + nodePool := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + location := envvar.GetTestZoneFromEnv() + + storagePoolNameURL := acctest.BootstrapComputeStoragePool(t, "basic-1", "hyperdisk-balanced") + storagePoolResourceName, err := extractSPName(storagePoolNameURL) + if err != nil { + t.Fatal("Failed to extract Storage Pool resource name from URL.") + } + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerNodePool_basic(cluster, nodePool, networkName, subnetworkName), + }, + { + ResourceName: "google_container_node_pool.np", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccContainerNodePool_withDiskMachineAndStoragePoolUpdate(cluster, nodePool, networkName, subnetworkName, storagePoolResourceName, location), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_node_pool.np", "node_config.0.storage_pools.0", storagePoolResourceName), + ), + }, + { + ResourceName: "google_container_node_pool.np", + ImportState: true, + ImportStateVerify: true, + // autoscaling.# = 0 is equivalent to no autoscaling at all, + // but will still cause an import diff + ImportStateVerifyIgnore: []string{"autoscaling.#", "node_config.0.taint", "deletion_protection"}, + }, + }, + }) +} + +func testAccContainerNodePool_withDiskMachineAndStoragePoolUpdate(cluster, np, networkName, subnetworkName, storagePoolResourceName, location string) string { + return fmt.Sprintf(` +provider "google" { + alias = "user-project-override" + user_project_override = true +} +resource "google_container_cluster" "cluster" { + provider = google.user-project-override + name = "%[1]s" + location = "%[6]s" + initial_node_count = 3 + deletion_protection = false + network = "%[3]s" + subnetwork = "%[4]s" +} + +resource "google_container_node_pool" "np" { + provider = google.user-project-override + name = "%[1]s" + location = "%[6]s" + cluster = google_container_cluster.cluster.name + initial_node_count = 2 + + node_config { + machine_type = "c3-standard-4" + disk_size_gb = 50 + disk_type = "hyperdisk-balanced" + storage_pools = ["%[5]s"] + } +} +`, cluster, np, networkName, subnetworkName, storagePoolResourceName, location) +} + diff --git a/mmv1/third_party/terraform/website/docs/r/container_cluster.html.markdown b/mmv1/third_party/terraform/website/docs/r/container_cluster.html.markdown index a6e83f1529c5..5f3e93f2fdac 100644 --- a/mmv1/third_party/terraform/website/docs/r/container_cluster.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/container_cluster.html.markdown @@ -941,6 +941,8 @@ gvnic { * `shielded_instance_config` - (Optional) Shielded Instance options. Structure is [documented below](#nested_shielded_instance_config). +* `storage_pools` - (Optional) The list of Storage Pools where boot disks are provisioned. + * `tags` - (Optional) The list of instance tags applied to all nodes. Tags are used to identify valid sources or targets for network firewalls.