Skip to content

Commit

Permalink
Add for storage_pools flag on cluster/nodepool create, and nodepool u…
Browse files Browse the repository at this point in the history
  • Loading branch information
amacaskill authored and abd-goog committed Sep 23, 2024
1 parent 7826387 commit 17cf994
Show file tree
Hide file tree
Showing 5 changed files with 303 additions and 3 deletions.
19 changes: 19 additions & 0 deletions mmv1/third_party/terraform/services/container/node_config.go.erb
Original file line number Diff line number Diff line change
Expand Up @@ -461,6 +461,14 @@ func schemaNodeConfig() *schema.Schema {
Description: `The list of instance tags applied to all nodes.`,
},

"storage_pools": {
Type: schema.TypeList,
ForceNew: true,
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
Description: `The list of Storage Pools where boot disks are provisioned.`,
},

"shielded_instance_config": {
Type: schema.TypeList,
Optional: true,
Expand Down Expand Up @@ -1039,6 +1047,16 @@ func expandNodeConfig(v interface{}) *container.NodeConfig {
nc.Tags = tags
}

if v, ok := nodeConfig["storage_pools"]; ok {
spList := v.([]interface{})
storagePools := []string{}
for _, v := range spList {
if v != nil {
storagePools = append(storagePools, v.(string))
}
}
nc.StoragePools = storagePools
}
if v, ok := nodeConfig["shielded_instance_config"]; ok && len(v.([]interface{})) > 0 {
conf := v.([]interface{})[0].(map[string]interface{})
nc.ShieldedInstanceConfig = &container.ShieldedInstanceConfig{
Expand Down Expand Up @@ -1458,6 +1476,7 @@ func flattenNodeConfig(c *container.NodeConfig, v interface{}) []map[string]inte
"tags": c.Tags,
"preemptible": c.Preemptible,
"secondary_boot_disks": flattenSecondaryBootDisks(c.SecondaryBootDisks),
"storage_pools": c.StoragePools,
"spot": c.Spot,
"min_cpu_platform": c.MinCpuPlatform,
"shielded_instance_config": flattenShieldedInstanceConfig(c.ShieldedInstanceConfig),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11547,3 +11547,127 @@ resource "google_container_cluster" "primary" {
}
`, name, networkName, subnetworkName)
}

func TestAccContainerCluster_storagePoolsWithNodePool(t *testing.T) {
t.Parallel()

cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10))
np := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10))
networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster")
subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName)
location := envvar.GetTestZoneFromEnv()

storagePoolNameURL := acctest.BootstrapComputeStoragePool(t, "basic-1", "hyperdisk-balanced")
storagePoolResourceName, err := extractSPName(storagePoolNameURL)
if err != nil {
t.Fatal("Failed to extract Storage Pool resource name from URL.")
}

acctest.VcrTest(t, resource.TestCase{
PreCheck: func() { acctest.AccTestPreCheck(t) },
ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t),
CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t),
Steps: []resource.TestStep{
{
Config: testAccContainerCluster_storagePoolsWithNodePool(cluster, location, networkName, subnetworkName, np, storagePoolResourceName),
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr("google_container_cluster.storage_pools_with_node_pool", "node_pool.0.node_config.0.storage_pools.0", storagePoolResourceName),
),
},
{
ResourceName: "google_container_cluster.storage_pools_with_node_pool",
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"deletion_protection"},
},
},
})
}

func testAccContainerCluster_storagePoolsWithNodePool(cluster, location, networkName, subnetworkName, np, storagePoolResourceName string) string {
return fmt.Sprintf(`
resource "google_container_cluster" "storage_pools_with_node_pool" {
name = "%s"
location = "%s"
deletion_protection = false
network = "%s"
subnetwork = "%s"
node_pool {
name = "%s"
initial_node_count = 1
node_config {
machine_type = "c3-standard-4"
image_type = "COS_CONTAINERD"
storage_pools = ["%s"]
disk_type = "hyperdisk-balanced"
}
}
}
`, cluster, location, networkName, subnetworkName, np, storagePoolResourceName)
}

func TestAccContainerCluster_storagePoolsWithNodeConfig(t *testing.T) {
t.Parallel()

cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10))
networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster")
subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName)
location := envvar.GetTestZoneFromEnv()

storagePoolNameURL := acctest.BootstrapComputeStoragePool(t, "basic-1", "hyperdisk-balanced")
storagePoolResourceName, err := extractSPName(storagePoolNameURL)
if err != nil {
t.Fatal("Failed to extract Storage Pool resource name from URL.")
}

acctest.VcrTest(t, resource.TestCase{
PreCheck: func() { acctest.AccTestPreCheck(t) },
ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t),
CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t),
Steps: []resource.TestStep{
{
Config: testAccContainerCluster_storagePoolsWithNodeConfig(cluster, location, networkName, subnetworkName, storagePoolResourceName),
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr("google_container_cluster.storage_pools_with_node_config", "node_config.0.storage_pools.0", storagePoolResourceName),
),
},
{
ResourceName: "google_container_cluster.storage_pools_with_node_config",
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"deletion_protection"},
},
},
})
}

func testAccContainerCluster_storagePoolsWithNodeConfig(cluster, location, networkName, subnetworkName, storagePoolResourceName string) string {
return fmt.Sprintf(`
resource "google_container_cluster" "storage_pools_with_node_config" {
name = "%s"
location = "%s"
initial_node_count = 1
deletion_protection = false
network = "%s"
subnetwork = "%s"
node_config {
machine_type = "c3-standard-4"
image_type = "COS_CONTAINERD"
storage_pools = ["%s"]
disk_type = "hyperdisk-balanced"
}
}
`, cluster, location, networkName, subnetworkName, storagePoolResourceName)
}

func extractSPName(url string) (string, error) {
re := regexp.MustCompile(`https://www\.googleapis\.com/compute/beta/(projects/[^"]+)`)
matches := re.FindStringSubmatch(url)

if len(matches) > 1 {
return matches[1], nil
} else {
return "", fmt.Errorf("no match found")
}
}

Original file line number Diff line number Diff line change
Expand Up @@ -1453,13 +1453,25 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node

if d.HasChange("node_config.0.disk_size_gb") ||
d.HasChange("node_config.0.disk_type") ||
d.HasChange("node_config.0.machine_type") {
d.HasChange("node_config.0.machine_type") ||
d.HasChange("node_config.0.storage_pools") {
req := &container.UpdateNodePoolRequest{
Name: name,
DiskSizeGb: int64(d.Get("node_config.0.disk_size_gb").(int)),
DiskType: d.Get("node_config.0.disk_type").(string),
MachineType: d.Get("node_config.0.machine_type").(string),
}
if v, ok := d.GetOk("node_config.0.storage_pools"); ok {
spList := v.([]interface{})
storagePools := []string{}
for _, v := range spList {
if v != nil {
storagePools = append(storagePools, v.(string))
}
}
req.StoragePools = storagePools
}

updateF := func() error {
clusterNodePoolsUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(name), req)
if config.UserProjectOverride {
Expand All @@ -1474,14 +1486,14 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node
return ContainerOperationWait(config, op,
nodePoolInfo.project,
nodePoolInfo.location,
"updating GKE node pool disk_size_gb/disk_type/machine_type", userAgent,
"updating GKE node pool disk_size_gb/disk_type/machine_type/storage_pools", userAgent,
timeout)
}

if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil {
return err
}
log.Printf("[INFO] Updated disk disk_size_gb/disk_type/machine_type for Node Pool %s", d.Id())
log.Printf("[INFO] Updated disk disk_size_gb/disk_type/machine_type/storage_pools for Node Pool %s", d.Id())
}

if d.HasChange(prefix + "node_config.0.taint") {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4873,3 +4873,146 @@ resource "google_container_node_pool" "np" {
}
`, cluster, np)
}

func TestAccContainerNodePool_storagePools(t *testing.T) {
t.Parallel()

cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10))
np := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10))
networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster")
subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName)
location := envvar.GetTestZoneFromEnv()

storagePoolNameURL := acctest.BootstrapComputeStoragePool(t, "basic-1", "hyperdisk-balanced")
storagePoolResourceName, err := extractSPName(storagePoolNameURL)
if err != nil {
t.Fatal("Failed to extract Storage Pool resource name from URL.")
}

acctest.VcrTest(t, resource.TestCase{
PreCheck: func() { acctest.AccTestPreCheck(t) },
ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t),
CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t),
Steps: []resource.TestStep{
{
Config: testAccContainerNodePool_storagePools(cluster, np, networkName, subnetworkName, storagePoolResourceName, location),
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr("google_container_node_pool.np", "node_config.0.storage_pools.0", storagePoolResourceName),
),
},
{
ResourceName: "google_container_node_pool.np",
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"deletion_protection"},
},
},
})
}

func testAccContainerNodePool_storagePools(cluster, np, networkName, subnetworkName, storagePoolResourceName, location string) string {
return fmt.Sprintf(`
resource "google_container_cluster" "cluster" {
name = "%[1]s"
location = "%[6]s"
initial_node_count = 1
deletion_protection = false
network = "%[3]s"
subnetwork = "%[4]s"
}

resource "google_container_node_pool" "np" {
name = "%[2]s"
location = "%[6]s"
cluster = google_container_cluster.cluster.name
initial_node_count = 1

node_config {
machine_type = "c3-standard-4"
image_type = "COS_CONTAINERD"
storage_pools = ["%[5]s"]
disk_type = "hyperdisk-balanced"
}
}
`, cluster, np, networkName, subnetworkName, storagePoolResourceName, location)
}


func TestAccContainerNodePool_withMachineDiskStoragePoolsUpdate(t *testing.T) {
t.Parallel()

cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10))
nodePool := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10))
networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster")
subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName)
location := envvar.GetTestZoneFromEnv()

storagePoolNameURL := acctest.BootstrapComputeStoragePool(t, "basic-1", "hyperdisk-balanced")
storagePoolResourceName, err := extractSPName(storagePoolNameURL)
if err != nil {
t.Fatal("Failed to extract Storage Pool resource name from URL.")
}
acctest.VcrTest(t, resource.TestCase{
PreCheck: func() { acctest.AccTestPreCheck(t) },
ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t),
CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t),
Steps: []resource.TestStep{
{
Config: testAccContainerNodePool_basic(cluster, nodePool, networkName, subnetworkName),
},
{
ResourceName: "google_container_node_pool.np",
ImportState: true,
ImportStateVerify: true,
},
{
Config: testAccContainerNodePool_withDiskMachineAndStoragePoolUpdate(cluster, nodePool, networkName, subnetworkName, storagePoolResourceName, location),
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr("google_container_node_pool.np", "node_config.0.storage_pools.0", storagePoolResourceName),
),
},
{
ResourceName: "google_container_node_pool.np",
ImportState: true,
ImportStateVerify: true,
// autoscaling.# = 0 is equivalent to no autoscaling at all,
// but will still cause an import diff
ImportStateVerifyIgnore: []string{"autoscaling.#", "node_config.0.taint", "deletion_protection"},
},
},
})
}

func testAccContainerNodePool_withDiskMachineAndStoragePoolUpdate(cluster, np, networkName, subnetworkName, storagePoolResourceName, location string) string {
return fmt.Sprintf(`
provider "google" {
alias = "user-project-override"
user_project_override = true
}
resource "google_container_cluster" "cluster" {
provider = google.user-project-override
name = "%[1]s"
location = "%[6]s"
initial_node_count = 3
deletion_protection = false
network = "%[3]s"
subnetwork = "%[4]s"
}

resource "google_container_node_pool" "np" {
provider = google.user-project-override
name = "%[1]s"
location = "%[6]s"
cluster = google_container_cluster.cluster.name
initial_node_count = 2

node_config {
machine_type = "c3-standard-4"
disk_size_gb = 50
disk_type = "hyperdisk-balanced"
storage_pools = ["%[5]s"]
}
}
`, cluster, np, networkName, subnetworkName, storagePoolResourceName, location)
}

Original file line number Diff line number Diff line change
Expand Up @@ -941,6 +941,8 @@ gvnic {

* `shielded_instance_config` - (Optional) Shielded Instance options. Structure is [documented below](#nested_shielded_instance_config).

* `storage_pools` - (Optional) The list of Storage Pools where boot disks are provisioned.

* `tags` - (Optional) The list of instance tags applied to all nodes. Tags are used to identify
valid sources or targets for network firewalls.

Expand Down

0 comments on commit 17cf994

Please sign in to comment.