From 1ac1d70d002941296d3d2138cc9b62c7ccaf016d Mon Sep 17 00:00:00 2001 From: Pawel Jasinski <56267784+pawelJas@users.noreply.github.com> Date: Thu, 29 Aug 2024 18:29:13 +0200 Subject: [PATCH 01/60] Promote Server Tls Policy to V1 (#11538) Co-authored-by: Stephen Lewis (Burrows) --- mmv1/products/networksecurity/ServerTlsPolicy.yaml | 6 ------ ...network_security_server_tls_policy_advanced.tf.erb | 1 - .../network_security_server_tls_policy_basic.tf.erb | 11 ----------- .../network_security_server_tls_policy_mtls.tf.erb | 3 --- ...work_security_server_tls_policy_server_cert.tf.erb | 1 - 5 files changed, 22 deletions(-) diff --git a/mmv1/products/networksecurity/ServerTlsPolicy.yaml b/mmv1/products/networksecurity/ServerTlsPolicy.yaml index fda172e87831..1163fc19f419 100644 --- a/mmv1/products/networksecurity/ServerTlsPolicy.yaml +++ b/mmv1/products/networksecurity/ServerTlsPolicy.yaml @@ -15,7 +15,6 @@ name: 'ServerTlsPolicy' base_url: 'projects/{{project}}/locations/{{location}}/serverTlsPolicies' create_url: 'projects/{{project}}/locations/{{location}}/serverTlsPolicies?serverTlsPolicyId={{name}}' -min_version: beta update_verb: :PATCH update_mask: true description: | @@ -47,26 +46,21 @@ import_format: ['projects/{{project}}/locations/{{location}}/serverTlsPolicies/{{name}}'] examples: - !ruby/object:Provider::Terraform::Examples - min_version: beta name: 'network_security_server_tls_policy_basic' - skip_vcr: true primary_resource_id: 'default' vars: resource_name: 'my-server-tls-policy' - !ruby/object:Provider::Terraform::Examples - min_version: beta name: 'network_security_server_tls_policy_advanced' primary_resource_id: 'default' vars: resource_name: 'my-server-tls-policy' - !ruby/object:Provider::Terraform::Examples - min_version: beta name: 'network_security_server_tls_policy_server_cert' primary_resource_id: 'default' vars: resource_name: 'my-server-tls-policy' - !ruby/object:Provider::Terraform::Examples - min_version: beta name: 'network_security_server_tls_policy_mtls' primary_resource_id: 'default' vars: diff --git a/mmv1/templates/terraform/examples/network_security_server_tls_policy_advanced.tf.erb b/mmv1/templates/terraform/examples/network_security_server_tls_policy_advanced.tf.erb index a507d5fbb23a..d71f5cd3cfe2 100644 --- a/mmv1/templates/terraform/examples/network_security_server_tls_policy_advanced.tf.erb +++ b/mmv1/templates/terraform/examples/network_security_server_tls_policy_advanced.tf.erb @@ -1,5 +1,4 @@ resource "google_network_security_server_tls_policy" "<%= ctx[:primary_resource_id] %>" { - provider = google-beta name = "<%= ctx[:vars]['resource_name'] %>" labels = { foo = "bar" diff --git a/mmv1/templates/terraform/examples/network_security_server_tls_policy_basic.tf.erb b/mmv1/templates/terraform/examples/network_security_server_tls_policy_basic.tf.erb index c7bb83171797..f0ff53b2019d 100644 --- a/mmv1/templates/terraform/examples/network_security_server_tls_policy_basic.tf.erb +++ b/mmv1/templates/terraform/examples/network_security_server_tls_policy_basic.tf.erb @@ -1,5 +1,4 @@ resource "google_network_security_server_tls_policy" "<%= ctx[:primary_resource_id] %>" { - provider = google-beta name = "<%= ctx[:vars]['resource_name'] %>" labels = { foo = "bar" @@ -17,16 +16,6 @@ resource "google_network_security_server_tls_policy" "<%= ctx[:primary_resource_ target_uri = "unix:mypath" } } - client_validation_ca { - grpc_endpoint { - target_uri = "unix:abc/mypath" - } - } - client_validation_ca { - certificate_provider_instance { - plugin_instance = "google_cloud_private_spiffe" - } - } } } diff --git a/mmv1/templates/terraform/examples/network_security_server_tls_policy_mtls.tf.erb b/mmv1/templates/terraform/examples/network_security_server_tls_policy_mtls.tf.erb index 12073fee43c1..a0002be15477 100644 --- a/mmv1/templates/terraform/examples/network_security_server_tls_policy_mtls.tf.erb +++ b/mmv1/templates/terraform/examples/network_security_server_tls_policy_mtls.tf.erb @@ -1,9 +1,7 @@ data "google_project" "project" { - provider = google-beta } resource "google_network_security_server_tls_policy" "<%= ctx[:primary_resource_id] %>" { - provider = google-beta name = "<%= ctx[:vars]['resource_name'] %>" description = "my description" @@ -21,7 +19,6 @@ resource "google_network_security_server_tls_policy" "<%= ctx[:primary_resource_ } resource "google_certificate_manager_trust_config" "default" { - provider = google-beta name = "<%= ctx[:vars]['trust_config_name'] %>" description = "sample trust config description" location = "global" diff --git a/mmv1/templates/terraform/examples/network_security_server_tls_policy_server_cert.tf.erb b/mmv1/templates/terraform/examples/network_security_server_tls_policy_server_cert.tf.erb index 39fa7127da06..0e4d74441396 100644 --- a/mmv1/templates/terraform/examples/network_security_server_tls_policy_server_cert.tf.erb +++ b/mmv1/templates/terraform/examples/network_security_server_tls_policy_server_cert.tf.erb @@ -1,5 +1,4 @@ resource "google_network_security_server_tls_policy" "<%= ctx[:primary_resource_id] %>" { - provider = google-beta name = "<%= ctx[:vars]['resource_name'] %>" labels = { foo = "bar" From 06201b26affa53c15cca0e9ce5a9405821bbdbb3 Mon Sep 17 00:00:00 2001 From: rlapin-pl <114071972+rlapin-pl@users.noreply.github.com> Date: Thu, 29 Aug 2024 19:21:49 +0200 Subject: [PATCH 02/60] Composer 3 documentation update. Change Private IP networking field name (#11566) Co-authored-by: rlapin-pl --- .../terraform/website/docs/r/composer_environment.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mmv1/third_party/terraform/website/docs/r/composer_environment.html.markdown b/mmv1/third_party/terraform/website/docs/r/composer_environment.html.markdown index 65a8ef1d2bc1..254316ed9053 100644 --- a/mmv1/third_party/terraform/website/docs/r/composer_environment.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/composer_environment.html.markdown @@ -319,7 +319,7 @@ resource "google_composer_environment" "example" { config { - enable_private_ip_environment = true + enable_private_environment = true # ... other configuration parameters } From e8221b85ce24f1b0f3bb8641a91d12a9eab63e3e Mon Sep 17 00:00:00 2001 From: chentaozgoogle Date: Thu, 29 Aug 2024 17:47:32 -0400 Subject: [PATCH 03/60] Support boot disk interface (#11276) --- ...data_source_google_compute_instance.go.erb | 2 +- .../compute/resource_compute_disk_test.go.erb | 2 +- .../compute/resource_compute_instance.go.erb | 21 ++++- ...compute_instance_from_template_test.go.erb | 80 +++++++++++++++++++ .../resource_compute_instance_test.go.erb | 76 ++++++++++++++++++ 5 files changed, 178 insertions(+), 3 deletions(-) diff --git a/mmv1/third_party/terraform/services/compute/data_source_google_compute_instance.go.erb b/mmv1/third_party/terraform/services/compute/data_source_google_compute_instance.go.erb index 7b0734cf6455..ca1d87890eb1 100644 --- a/mmv1/third_party/terraform/services/compute/data_source_google_compute_instance.go.erb +++ b/mmv1/third_party/terraform/services/compute/data_source_google_compute_instance.go.erb @@ -59,7 +59,7 @@ func dataSourceGoogleComputeInstanceRead(d *schema.ResourceData, meta interface{ if err != nil { return err } - if err := d.Set("network_interface", networkInterfaces); err != nil { + if err := d.Set("network_inferface", networkInterfaces); err != nil { return err } diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_disk_test.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_disk_test.go.erb index 93da05cbc6ec..9a9633c1c1b5 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_disk_test.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_disk_test.go.erb @@ -1762,7 +1762,7 @@ func TestAccComputeDisk_accessModeSpecified(t *testing.T) { } func testAccComputeDisk_accessModeSpecified(diskName, accessMode string) string { - return fmt.Sprintf(` + return fmt.Sprintf(` resource "google_compute_disk" "foobar" { name = "%s" type = "hyperdisk-ml" diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_instance.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_instance.go.erb index 68f77eaeb302..9d3c847b5931 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_instance.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_instance.go.erb @@ -206,6 +206,13 @@ func ResourceComputeInstance() *schema.Resource { Description: `The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption key that protects this resource.`, }, + "interface": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"SCSI", "NVME"}, false), + Description: `The disk interface used for attaching this disk. One of SCSI or NVME. (This field is shared with attached_disk and only used for specific cases, please don't specify this field without advice from Google.)`, + }, + "kms_key_self_link": { Type: schema.TypeString, Optional: true, @@ -1684,6 +1691,7 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error di["disk_encryption_key_sha256"] = key.Sha256 } } + // We want the disks to remain in the order we set in the config, so if a disk // is present in the config, make sure it's at the correct index. Otherwise, append it. if inConfig { @@ -2723,6 +2731,10 @@ func expandAttachedDisk(diskConfig map[string]interface{}, d *schema.ResourceDat disk.DeviceName = v.(string) } + if v, ok := d.GetOk("boot_disk.0.interface"); ok && v != "" { + disk.Interface = v.(string) + } + keyValue, keyOk := diskConfig["disk_encryption_key_raw"] if keyOk { if keyValue != "" { @@ -2924,6 +2936,10 @@ func expandBootDisk(d *schema.ResourceData, config *transport_tpg.Config, projec disk.DeviceName = v.(string) } + if v, ok := d.GetOk("boot_disk.0.interface"); ok { + disk.Interface = v.(string) + } + if v, ok := d.GetOk("boot_disk.0.disk_encryption_key_raw"); ok { if v != "" { disk.DiskEncryptionKey = &compute.CustomerEncryptionKey{ @@ -3024,6 +3040,9 @@ func flattenBootDisk(d *schema.ResourceData, disk *compute.AttachedDisk, config // originally specified to avoid diffs. "disk_encryption_key_raw": d.Get("boot_disk.0.disk_encryption_key_raw"), } + if _,ok := d.GetOk("boot_disk.0.interface"); ok { + result["interface"] = disk.Interface + } diskDetails, err := getDisk(disk.Source, d, config) if err != nil { @@ -3196,4 +3215,4 @@ func CheckForCommonAliasIp(old, new *compute.NetworkInterface) []*compute.AliasI } } return resultAliasIpRanges -} \ No newline at end of file +} diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_instance_from_template_test.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_instance_from_template_test.go.erb index b9748a2a7702..69e7383c0100 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_instance_from_template_test.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_instance_from_template_test.go.erb @@ -1777,3 +1777,83 @@ resource "google_compute_instance_from_template" "inst2" { } `, templateDisk, image, template, confidentialInstanceType, instance, template2, confidentialInstanceType, instance2) } + +func TestAccComputeInstanceFromTemplateWithOverride_interface(t *testing.T) { + t.Parallel() + + var instance compute.Instance + instanceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + templateName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + resourceName := "google_compute_instance_from_template.foobar" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceFromTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceFromTemplateWithOverride_interface(instanceName, templateName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, resourceName, &instance), + resource.TestCheckResourceAttr(resourceName, "boot_disk.0.interface", "SCSI"), + ), + }, + }, + }) +} + +func testAccComputeInstanceFromTemplateWithOverride_interface(instance, template string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_disk" "foobarboot" { + name = "%s" + image = data.google_compute_image.my_image.self_link + size = 10 + type = "pd-ssd" + zone = "us-central1-a" +} + +resource "google_compute_disk" "foobarattach" { + name = "%s" + size = 100 + type = "pd-balanced" + zone = "us-central1-a" +} + +resource "google_compute_instance_template" "foobar" { + name = "%s" + machine_type = "n1-standard-1" // can't be e2 because of local-ssd + + disk { + source = google_compute_disk.foobarboot.name + auto_delete = false + boot = true + } + + + network_interface { + network = "default" + } +} + +resource "google_compute_instance_from_template" "foobar" { + name = "%s" + zone = "us-central1-a" + + source_instance_template = google_compute_instance_template.foobar.self_link + + attached_disk { + source = google_compute_disk.foobarattach.name + } + // Overrides + boot_disk { + interface = "SCSI" + source = google_compute_disk.foobarboot.name + } +} +`, template, instance, template, instance) +} diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_instance_test.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_instance_test.go.erb index 96cfb77c0eee..ee355c7f988d 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_instance_test.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_instance_test.go.erb @@ -10300,3 +10300,79 @@ resource "google_compute_instance" "foobar" { } `, instanceName, zone, storagePoolUrl) } + +func TestAccComputeInstance_bootAndAttachedDisk_interface(t *testing.T) { + t.Parallel() + + instanceName1 := fmt.Sprintf("tf-test-vm1-%s", acctest.RandString(t, 10)) + diskName1 := fmt.Sprintf("tf-test-disk1-%s", acctest.RandString(t, 10)) + instanceName2 := fmt.Sprintf("tf-test-vm2-%s", acctest.RandString(t, 10)) + diskName2 := fmt.Sprintf("tf-test-disk2-%s", acctest.RandString(t, 10)) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_bootAndAttachedDisk_interface(instanceName1, diskName1, envvar.GetTestZoneFromEnv(), "h3-standard-88", "NVME", false), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_compute_instance.foobar", "boot_disk.0.interface", "NVME"), + resource.TestCheckResourceAttr("google_compute_instance.foobar", "machine_type", "h3-standard-88"), + ), + }, + //computeInstanceImportStep("us-central1-a", instanceName1, []string{"desired_status","allow_stopping_for_update"}), + { + Config: testAccComputeInstance_bootAndAttachedDisk_interface(instanceName2, diskName2, envvar.GetTestZoneFromEnv(), "n2-standard-8", "SCSI", true), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_compute_instance.foobar", "boot_disk.0.interface", "SCSI"), + resource.TestCheckResourceAttr("google_compute_instance.foobar", "machine_type", "n2-standard-8"), + ), + }, + //computeInstanceImportStep("us-central1-a", instanceName2, []string{"desired_status","allow_stopping_for_update"}), + }, + }) +} + +func testAccComputeInstance_bootAndAttachedDisk_interface(instanceName, diskName, zone, machineType, bootDiskInterface string, allowStoppingForUpdate bool) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "ubuntu-2204-lts" + project = "ubuntu-os-cloud" +} + +data "google_project" "project" {} + +resource "google_compute_disk" "foorbarattach" { + name = "%s" + size = 100 + type = "pd-balanced" + zone = "us-central1-a" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type= "%s" + zone = "%s" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + type = "pd-balanced" + size = 500 + } + interface = "%s" + + } + + attached_disk { + source = google_compute_disk.foorbarattach.self_link + } + + network_interface { + network = "default" + } + allow_stopping_for_update = %t + desired_status = "RUNNING" + +} +`, diskName, instanceName, machineType, zone, bootDiskInterface, allowStoppingForUpdate) +} From bba168c53a1cd6c79cc2799da4a9878ca97778fb Mon Sep 17 00:00:00 2001 From: Will Yardley Date: Thu, 29 Aug 2024 15:06:25 -0700 Subject: [PATCH 04/60] container: Add `node_kublet_config` support for autopilot clusters (#11573) Co-authored-by: Stephen Lewis (Burrows) --- .../services/container/node_config.go.erb | 26 +++++++ .../resource_container_cluster.go.erb | 26 +++++++ .../resource_container_cluster_test.go.erb | 77 +++++++++++++++++++ .../docs/r/container_cluster.html.markdown | 11 ++- 4 files changed, 138 insertions(+), 2 deletions(-) diff --git a/mmv1/third_party/terraform/services/container/node_config.go.erb b/mmv1/third_party/terraform/services/container/node_config.go.erb index 14ec2f469a44..fd9feb10e467 100644 --- a/mmv1/third_party/terraform/services/container/node_config.go.erb +++ b/mmv1/third_party/terraform/services/container/node_config.go.erb @@ -781,6 +781,22 @@ func schemaNodeConfig() *schema.Schema { } } +// Separate since this currently only supports a single value -- a subset of +// the overall NodeKubeletConfig +func schemaNodePoolAutoConfigNodeKubeletConfig() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `Node kubelet configs.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "insecure_kubelet_readonly_port_enabled": schemaInsecureKubeletReadonlyPortEnabled(), + }, + }, + } +} + func expandNodeConfigDefaults(configured interface{}) *container.NodeConfigDefaults { configs := configured.([]interface{}) if len(configs) == 0 || configs[0] == nil { @@ -1752,6 +1768,16 @@ func flattenKubeletConfig(c *container.NodeKubeletConfig) []map[string]interface return result } +func flattenNodePoolAutoConfigNodeKubeletConfig(c *container.NodeKubeletConfig) []map[string]interface{} { + result := []map[string]interface{}{} + if c != nil { + result = append(result, map[string]interface{}{ + "insecure_kubelet_readonly_port_enabled": flattenInsecureKubeletReadonlyPortEnabled(c), + }) + } + return result +} + func flattenLinuxNodeConfig(c *container.LinuxNodeConfig) []map[string]interface{} { result := []map[string]interface{}{} if c != nil { diff --git a/mmv1/third_party/terraform/services/container/resource_container_cluster.go.erb b/mmv1/third_party/terraform/services/container/resource_container_cluster.go.erb index 9a2ce54a0d49..1957be85b5b2 100644 --- a/mmv1/third_party/terraform/services/container/resource_container_cluster.go.erb +++ b/mmv1/third_party/terraform/services/container/resource_container_cluster.go.erb @@ -1505,6 +1505,7 @@ func ResourceContainerCluster() *schema.Resource { Description: `Node pool configs that apply to all auto-provisioned node pools in autopilot clusters and node auto-provisioning enabled clusters.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "node_kubelet_config": schemaNodePoolAutoConfigNodeKubeletConfig(), "network_tags": { Type: schema.TypeList, Optional: true, @@ -4403,6 +4404,24 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er } } + if d.HasChange("node_pool_auto_config.0.node_kubelet_config") { + req := &container.UpdateClusterRequest{ + Update: &container.ClusterUpdate{ + DesiredNodePoolAutoConfigKubeletConfig: expandKubeletConfig( + d.Get("node_pool_auto_config.0.node_kubelet_config"), + ), + }, + } + + updateF := updateFunc(req, "updating GKE cluster node pool auto config node_kubelet_config parameters") + // Call update serially. + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] GKE cluster %s node pool auto config node_kubelet_config parameters have been updated", d.Id()) + } + if d.HasChange("node_pool_auto_config.0.network_tags.0.tags") { tags := d.Get("node_pool_auto_config.0.network_tags.0.tags").([]interface{}) @@ -5737,6 +5756,10 @@ func expandNodePoolAutoConfig(configured interface{}) *container.NodePoolAutoCon npac := &container.NodePoolAutoConfig{} config := l[0].(map[string]interface{}) + if v, ok := config["node_kubelet_config"]; ok { + npac.NodeKubeletConfig = expandKubeletConfig(v) + } + if v, ok := config["network_tags"]; ok && len(v.([]interface{})) > 0 { npac.NetworkTags = expandNodePoolAutoConfigNetworkTags(v) } @@ -6575,6 +6598,9 @@ func flattenNodePoolAutoConfig(c *container.NodePoolAutoConfig) []map[string]int } result := make(map[string]interface{}) + if c.NodeKubeletConfig != nil { + result["node_kubelet_config"] = flattenNodePoolAutoConfigNodeKubeletConfig(c.NodeKubeletConfig) + } if c.NetworkTags != nil { result["network_tags"] = flattenNodePoolAutoConfigNetworkTags(c.NetworkTags) } diff --git a/mmv1/third_party/terraform/services/container/resource_container_cluster_test.go.erb b/mmv1/third_party/terraform/services/container/resource_container_cluster_test.go.erb index b5621ab74e7c..29e6d6f4d4fb 100644 --- a/mmv1/third_party/terraform/services/container/resource_container_cluster_test.go.erb +++ b/mmv1/third_party/terraform/services/container/resource_container_cluster_test.go.erb @@ -3248,6 +3248,52 @@ func TestAccContainerCluster_withAutopilotNetworkTags(t *testing.T) { }) } +func TestAccContainerCluster_withAutopilotKubeletConfig(t *testing.T) { + t.Parallel() + + randomSuffix := acctest.RandString(t, 10) + clusterName := fmt.Sprintf("tf-test-cluster-%s", randomSuffix) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + ExternalProviders: map[string]resource.ExternalProvider{ + "time": {}, + }, + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withAutopilotKubeletConfigBaseline(clusterName), + }, + { + ResourceName: "google_container_cluster.with_autopilot_kubelet_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + { + Config: testAccContainerCluster_withAutopilotKubeletConfigUpdates(clusterName, "FALSE"), + }, + { + ResourceName: "google_container_cluster.with_autopilot_kubelet_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + { + Config: testAccContainerCluster_withAutopilotKubeletConfigUpdates(clusterName, "TRUE"), + }, + { + ResourceName: "google_container_cluster.with_autopilot_kubelet_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + }, + }) +} + + func TestAccContainerCluster_withAutopilotResourceManagerTags(t *testing.T) { t.Parallel() @@ -10517,6 +10563,37 @@ func testAccContainerCluster_withWorkloadALTSConfigAutopilot(projectID, name str <% end -%> +func testAccContainerCluster_withAutopilotKubeletConfigBaseline(name string) string { + return fmt.Sprintf(` + resource "google_container_cluster" "with_autopilot_kubelet_config" { + name = "%s" + location = "us-central1" + initial_node_count = 1 + enable_autopilot = true + deletion_protection = false + } +`, name) +} + +func testAccContainerCluster_withAutopilotKubeletConfigUpdates(name, insecureKubeletReadonlyPortEnabled string) string { + return fmt.Sprintf(` + resource "google_container_cluster" "with_autopilot_kubelet_config" { + name = "%s" + location = "us-central1" + initial_node_count = 1 + + node_pool_auto_config { + node_kubelet_config { + insecure_kubelet_readonly_port_enabled = "%s" + } + } + + enable_autopilot = true + deletion_protection = false + } +`, name, insecureKubeletReadonlyPortEnabled) +} + func testAccContainerCluster_resourceManagerTags(projectID, clusterName, networkName, subnetworkName, randomSuffix string) string { return fmt.Sprintf(` data "google_project" "project" { diff --git a/mmv1/third_party/terraform/website/docs/r/container_cluster.html.markdown b/mmv1/third_party/terraform/website/docs/r/container_cluster.html.markdown index c395f44b87f3..a6e83f1529c5 100644 --- a/mmv1/third_party/terraform/website/docs/r/container_cluster.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/container_cluster.html.markdown @@ -1084,11 +1084,18 @@ workload_identity_config { The `node_pool_auto_config` block supports: +* `node_kubelet_config` - (Optional) Kubelet configuration for Autopilot clusters. Currently, only `insecure_kubelet_readonly_port_enabled` is supported here. +Structure is [documented below](#nested_node_kubelet_config). + * `resource_manager_tags` - (Optional) A map of resource manager tag keys and values to be attached to the nodes for managing Compute Engine firewalls using Network Firewall Policies. Tags must be according to specifications found [here](https://cloud.google.com/vpc/docs/tags-firewalls-overview#specifications). A maximum of 5 tag key-value pairs can be specified. Existing tags will be replaced with new values. Tags must be in one of the following formats ([KEY]=[VALUE]) 1. `tagKeys/{tag_key_id}=tagValues/{tag_value_id}` 2. `{org_id}/{tag_key_name}={tag_value_name}` 3. `{project_id}/{tag_key_name}={tag_value_name}`. -* `network_tags` (Optional) - The network tag config for the cluster's automatically provisioned node pools. +* `network_tags` (Optional) - The network tag config for the cluster's automatically provisioned node pools. Structure is [documented below](#nested_network_tags). + +The `node_kubelet_config` block supports: + +* `insecure_kubelet_readonly_port_enabled` - (Optional) Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. -The `network_tags` block supports: +The `network_tags` block supports: * `tags` (Optional) - List of network tags applied to auto-provisioned node pools. From d87891e2492f2dcc5a666e6e99e4917eab882761 Mon Sep 17 00:00:00 2001 From: Shuya Ma <87669292+shuyama1@users.noreply.github.com> Date: Thu, 29 Aug 2024 16:00:46 -0700 Subject: [PATCH 05/60] Made tgc test data change trigger tgc test in CI (#11584) Co-authored-by: Stephen Lewis (Burrows) --- .github/workflows/test-tgc.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test-tgc.yml b/.github/workflows/test-tgc.yml index 91274728e35a..e302188b99a0 100644 --- a/.github/workflows/test-tgc.yml +++ b/.github/workflows/test-tgc.yml @@ -67,7 +67,7 @@ jobs: - name: Check for Code Changes id: pull_request run: | - gofiles=$(git diff --name-only HEAD~1 | { grep -e "\.go$" -e "go.mod$" -e "go.sum$" || test $? = 1; }) + gofiles=$(git diff --name-only HEAD~1 | { grep -e "\.go$" -e "go.mod$" -e "go.sum$" -e "tfplan2cai/testdata/" || test $? = 1; }) if [ -z "$gofiles" ]; then echo "has_changes=false" >> $GITHUB_OUTPUT else From 1b55ddfc9c700a45cc366b3262aa10fb3744cbb3 Mon Sep 17 00:00:00 2001 From: Shuya Ma <87669292+shuyama1@users.noreply.github.com> Date: Fri, 30 Aug 2024 10:30:38 -0700 Subject: [PATCH 06/60] Fix post-6.0.0 TGC issues (#11581) --- .../tgc/tests/data/example_alloydb_instance.json | 10 +++++++++- .../example_artifact_registry_repository.json | 3 +++ ...ery_dataset_iam_policy_empty_policy_data.json | 3 +++ .../tests/data/example_bigtable_instance.json | 1 - .../tests/data/example_cloud_run_mapping.json | 6 ++++++ .../tests/data/example_cloud_run_service.json | 3 +++ .../tgc/tests/data/example_cloud_run_v2_job.json | 3 +++ .../tgc/tests/data/example_compute_address.json | 6 +++--- .../data/example_compute_global_address.json | 4 +++- .../tgc/tests/data/example_compute_snapshot.json | 3 +++ .../tests/data/example_compute_vpn_tunnel.json | 16 +++++++++++++++- .../tgc/tests/data/example_dns_managed_zone.json | 3 +++ .../tests/data/example_filestore_instance.json | 3 +++ .../tgc/tests/data/example_gke_hub_feature.json | 3 ++- .../example_google_cloudfunctions_function.json | 1 - .../data/example_google_dataproc_cluster.json | 3 +-- ...ple_google_datastream_connection_profile.json | 3 +++ .../data/example_google_datastream_stream.json | 14 +++++++++++++- ...ple_google_datastream_stream_append_only.json | 14 +++++++++++++- .../tgc/tests/data/example_kms_crypto_key.json | 3 +++ .../data/example_kms_crypto_key_iam_binding.json | 3 +++ .../data/example_kms_crypto_key_iam_member.json | 3 +++ .../data/example_kms_crypto_key_iam_policy.json | 3 +++ .../example_monitoring_notification_channel.json | 3 +-- .../tgc/tests/data/example_project_create.json | 1 - .../example_project_create_empty_project_id.json | 1 - ...empty_project_id_without_default_project.json | 1 - .../tgc/tests/data/example_project_update.json | 1 - .../tgc/tests/data/example_spanner_database.json | 3 +++ .../example_spanner_database_iam_binding.json | 3 +++ .../example_spanner_database_iam_member.json | 3 +++ .../example_spanner_database_iam_policy.json | 3 +++ .../example_spanner_instance_iam_binding.json | 3 +++ .../example_spanner_instance_iam_member.json | 3 +++ .../example_spanner_instance_iam_policy.json | 3 +++ .../tgc/tests/data/full_compute_instance.json | 1 - .../tgc/tests/data/full_container_cluster.json | 1 - .../tgc/tests/data/full_container_node_pool.json | 1 - .../tests/data/full_sql_database_instance.json | 3 +++ .../tgc/tests/data/full_storage_bucket.json | 1 - 40 files changed, 126 insertions(+), 23 deletions(-) diff --git a/mmv1/third_party/tgc/tests/data/example_alloydb_instance.json b/mmv1/third_party/tgc/tests/data/example_alloydb_instance.json index 3cb342f850e4..313db786524e 100644 --- a/mmv1/third_party/tgc/tests/data/example_alloydb_instance.json +++ b/mmv1/third_party/tgc/tests/data/example_alloydb_instance.json @@ -9,6 +9,9 @@ "parent":"//cloudresourcemanager.googleapis.com/projects/{{.Provider.project}}", "data":{ "instanceType":"PRIMARY", + "labels": { + "goog-terraform-provisioned": "true" + }, "machineConfig":{ "cpuCount":2 } @@ -32,7 +35,12 @@ "initialUser":{ "password":"alloydb-cluster" }, - "network":"default" + "labels": { + "goog-terraform-provisioned": "true" + }, + "networkConfig": { + "network":"default" + } } }, "ancestry_path": "{{.Ancestry}}/project/{{.Provider.project}}", diff --git a/mmv1/third_party/tgc/tests/data/example_artifact_registry_repository.json b/mmv1/third_party/tgc/tests/data/example_artifact_registry_repository.json index 8c2596d216a3..7c903a5cc3da 100644 --- a/mmv1/third_party/tgc/tests/data/example_artifact_registry_repository.json +++ b/mmv1/third_party/tgc/tests/data/example_artifact_registry_repository.json @@ -11,6 +11,9 @@ "data": { "description": "example docker repository", "format": "DOCKER", + "labels": { + "goog-terraform-provisioned": "true" + }, "mode": "STANDARD_REPOSITORY" } }, diff --git a/mmv1/third_party/tgc/tests/data/example_bigquery_dataset_iam_policy_empty_policy_data.json b/mmv1/third_party/tgc/tests/data/example_bigquery_dataset_iam_policy_empty_policy_data.json index f1a5ec56ace2..c985419ed3a7 100644 --- a/mmv1/third_party/tgc/tests/data/example_bigquery_dataset_iam_policy_empty_policy_data.json +++ b/mmv1/third_party/tgc/tests/data/example_bigquery_dataset_iam_policy_empty_policy_data.json @@ -12,6 +12,9 @@ "datasetReference": { "datasetId": "example_dataset" }, + "labels": { + "goog-terraform-provisioned": "true" + }, "location": "US", "friendlyName": "" } diff --git a/mmv1/third_party/tgc/tests/data/example_bigtable_instance.json b/mmv1/third_party/tgc/tests/data/example_bigtable_instance.json index 3dfe0db72c26..33dcca6a3d72 100644 --- a/mmv1/third_party/tgc/tests/data/example_bigtable_instance.json +++ b/mmv1/third_party/tgc/tests/data/example_bigtable_instance.json @@ -10,7 +10,6 @@ "parent": "//cloudresourcemanager.googleapis.com/projects/{{.Provider.project}}", "data": { "labels": { - "goog-terraform-provisioned": "true", "test-name": "test-value" }, "name": "projects/{{.Provider.project}}/instances/tf-instance" diff --git a/mmv1/third_party/tgc/tests/data/example_cloud_run_mapping.json b/mmv1/third_party/tgc/tests/data/example_cloud_run_mapping.json index b0ee7a454179..279d350f61d1 100644 --- a/mmv1/third_party/tgc/tests/data/example_cloud_run_mapping.json +++ b/mmv1/third_party/tgc/tests/data/example_cloud_run_mapping.json @@ -12,6 +12,9 @@ "apiVersion": "domains.cloudrun.com/v1", "kind": "DomainMapping", "metadata": { + "labels": { + "goog-terraform-provisioned": "true" + }, "name": "tf-test-domain-meep.gcp.tfacc.hashicorptest.com", "namespace": "{{.Provider.project}}" }, @@ -36,6 +39,9 @@ "kind": "Service", "metadata": { "name": "tf-test-cloudrun-srv-beep", + "labels": { + "goog-terraform-provisioned": "true" + }, "namespace": "{{.Provider.project}}" }, "spec": { diff --git a/mmv1/third_party/tgc/tests/data/example_cloud_run_service.json b/mmv1/third_party/tgc/tests/data/example_cloud_run_service.json index 70c5a7a0e44f..8730d5045cee 100644 --- a/mmv1/third_party/tgc/tests/data/example_cloud_run_service.json +++ b/mmv1/third_party/tgc/tests/data/example_cloud_run_service.json @@ -15,6 +15,9 @@ "annotations": { "generated-by": "magic-modules" }, + "labels": { + "goog-terraform-provisioned": "true" + }, "name": "cloudrun-to-get-cai", "namespace": "{{.Provider.project}}" }, diff --git a/mmv1/third_party/tgc/tests/data/example_cloud_run_v2_job.json b/mmv1/third_party/tgc/tests/data/example_cloud_run_v2_job.json index 394248652bbd..e49031623723 100644 --- a/mmv1/third_party/tgc/tests/data/example_cloud_run_v2_job.json +++ b/mmv1/third_party/tgc/tests/data/example_cloud_run_v2_job.json @@ -8,6 +8,9 @@ "discovery_name":"Job", "parent":"//cloudresourcemanager.googleapis.com/projects/{{.Provider.project}}", "data":{ + "labels": { + "goog-terraform-provisioned": "true" + }, "template":{ "template":{ "containers":[ diff --git a/mmv1/third_party/tgc/tests/data/example_compute_address.json b/mmv1/third_party/tgc/tests/data/example_compute_address.json index 73d9e8898602..56b2d7322449 100644 --- a/mmv1/third_party/tgc/tests/data/example_compute_address.json +++ b/mmv1/third_party/tgc/tests/data/example_compute_address.json @@ -28,6 +28,9 @@ "address": "10.0.42.42", "addressType": "INTERNAL", "name": "my-internal-address", + "labels": { + "goog-terraform-provisioned": "true" + }, "region": "projects/{{.Provider.project}}/global/regions/us-central1" } } @@ -46,9 +49,6 @@ "logConfig": { "enable": false }, - "labels": { - "goog-terraform-provisioned": "true" - }, "name": "my-subnet", "region": "projects/{{.Provider.project}}/global/regions/us-central1" } diff --git a/mmv1/third_party/tgc/tests/data/example_compute_global_address.json b/mmv1/third_party/tgc/tests/data/example_compute_global_address.json index 17cf91b3c46d..1331c6418736 100644 --- a/mmv1/third_party/tgc/tests/data/example_compute_global_address.json +++ b/mmv1/third_party/tgc/tests/data/example_compute_global_address.json @@ -10,7 +10,9 @@ "parent": "//cloudresourcemanager.googleapis.com/projects/{{.Provider.project}}", "data": { "addressType": "EXTERNAL", - "goog-terraform-provisioned": "true", + "labels": { + "goog-terraform-provisioned": "true" + }, "name": "global-appserver-ip" } } diff --git a/mmv1/third_party/tgc/tests/data/example_compute_snapshot.json b/mmv1/third_party/tgc/tests/data/example_compute_snapshot.json index d35246332951..e3cb28eba864 100644 --- a/mmv1/third_party/tgc/tests/data/example_compute_snapshot.json +++ b/mmv1/third_party/tgc/tests/data/example_compute_snapshot.json @@ -33,6 +33,9 @@ "parent": "//cloudresourcemanager.googleapis.com/projects/{{.Provider.project}}", "data": { "name": "debian-disk", + "labels": { + "goog-terraform-provisioned": "true" + }, "sizeGb": 10, "sourceImage": "projects/debian-cloud/global/images/debian-8-jessie-v20170523", "type": "projects/{{.Provider.project}}/zones/us-central1-a/diskTypes/pd-ssd", diff --git a/mmv1/third_party/tgc/tests/data/example_compute_vpn_tunnel.json b/mmv1/third_party/tgc/tests/data/example_compute_vpn_tunnel.json index f35141fdef9b..b919ce6bab10 100644 --- a/mmv1/third_party/tgc/tests/data/example_compute_vpn_tunnel.json +++ b/mmv1/third_party/tgc/tests/data/example_compute_vpn_tunnel.json @@ -1,5 +1,16 @@ [ - "foo": "bar", + { + "name":"//compute.googleapis.com/projects/{{.Provider.project}}/regions/us-central1/vpnTunnels/tunnel-1", + "asset_type":"compute.googleapis.com/VpnTunnel", + "resource":{ + "version":"v1", + "discovery_document_uri":"https://www.googleapis.com/discovery/v1/apis/compute/beta/rest", + "discovery_name":"VpnTunnel", + "parent":"//cloudresourcemanager.googleapis.com/projects/{{.Provider.project}}", + "data":{ + "ikeVersion":2, + "labels":{ + "foo":"bar", "goog-terraform-provisioned": "true" }, "name":"tunnel-1", @@ -61,6 +72,9 @@ "data":{ "addressType":"EXTERNAL", "name":"vpn-static-ip", + "labels": { + "goog-terraform-provisioned": "true" + }, "region":"projects/{{.Provider.project}}/global/regions/us-central1" } }, diff --git a/mmv1/third_party/tgc/tests/data/example_dns_managed_zone.json b/mmv1/third_party/tgc/tests/data/example_dns_managed_zone.json index 5fee4143e1c9..b165a75bf934 100644 --- a/mmv1/third_party/tgc/tests/data/example_dns_managed_zone.json +++ b/mmv1/third_party/tgc/tests/data/example_dns_managed_zone.json @@ -24,6 +24,9 @@ "nonExistence": "nsec3", "state": "on" }, + "labels": { + "goog-terraform-provisioned": "true" + }, "name": "publiczone", "privateVisibilityConfig": { "networks": [] diff --git a/mmv1/third_party/tgc/tests/data/example_filestore_instance.json b/mmv1/third_party/tgc/tests/data/example_filestore_instance.json index c7b29f655583..1ead2163484b 100644 --- a/mmv1/third_party/tgc/tests/data/example_filestore_instance.json +++ b/mmv1/third_party/tgc/tests/data/example_filestore_instance.json @@ -15,6 +15,9 @@ "name": "share1" } ], + "labels": { + "goog-terraform-provisioned": "true" + }, "networks": [ { "connectMode": "DIRECT_PEERING", diff --git a/mmv1/third_party/tgc/tests/data/example_gke_hub_feature.json b/mmv1/third_party/tgc/tests/data/example_gke_hub_feature.json index fc13dafd911b..32a8c6e08cd4 100644 --- a/mmv1/third_party/tgc/tests/data/example_gke_hub_feature.json +++ b/mmv1/third_party/tgc/tests/data/example_gke_hub_feature.json @@ -11,7 +11,8 @@ "data": { "fleetDefaultMemberConfig": null, "labels": { - "foo": "bar" + "foo": "bar", + "goog-terraform-provisioned": "true" } } }, diff --git a/mmv1/third_party/tgc/tests/data/example_google_cloudfunctions_function.json b/mmv1/third_party/tgc/tests/data/example_google_cloudfunctions_function.json index e0ae59aef239..8965a7c1a46b 100644 --- a/mmv1/third_party/tgc/tests/data/example_google_cloudfunctions_function.json +++ b/mmv1/third_party/tgc/tests/data/example_google_cloudfunctions_function.json @@ -15,7 +15,6 @@ "MY_CF_ENV": "my-cf-env" }, "labels": { - "goog-terraform-provisioned": "true", "my-cf-label-value": "my-cf-label-value" }, "location": "us-east1", diff --git a/mmv1/third_party/tgc/tests/data/example_google_dataproc_cluster.json b/mmv1/third_party/tgc/tests/data/example_google_dataproc_cluster.json index eea13a44173b..11f8739c4748 100644 --- a/mmv1/third_party/tgc/tests/data/example_google_dataproc_cluster.json +++ b/mmv1/third_party/tgc/tests/data/example_google_dataproc_cluster.json @@ -57,8 +57,7 @@ } }, "labels": { - "foo": "bar", - "goog-terraform-provisioned": "true" + "foo": "bar" }, "projectId": "{{.Provider.project}}" } diff --git a/mmv1/third_party/tgc/tests/data/example_google_datastream_connection_profile.json b/mmv1/third_party/tgc/tests/data/example_google_datastream_connection_profile.json index 56e862f846b0..cd76928e5c2b 100644 --- a/mmv1/third_party/tgc/tests/data/example_google_datastream_connection_profile.json +++ b/mmv1/third_party/tgc/tests/data/example_google_datastream_connection_profile.json @@ -11,6 +11,9 @@ "data": { "bigqueryProfile": null, "displayName": "Connection profile", + "labels": { + "goog-terraform-provisioned": "true" + }, "gcsProfile": { "bucket": "my-bucket", "rootPath": "/path" diff --git a/mmv1/third_party/tgc/tests/data/example_google_datastream_stream.json b/mmv1/third_party/tgc/tests/data/example_google_datastream_stream.json index 598a664e33b1..f70e4af240c2 100644 --- a/mmv1/third_party/tgc/tests/data/example_google_datastream_stream.json +++ b/mmv1/third_party/tgc/tests/data/example_google_datastream_stream.json @@ -13,6 +13,9 @@ }, "description": "Database of postgres", "friendlyName": "stpostgres", + "labels": { + "goog-terraform-provisioned": "true" + }, "location": "us-central1" } }, @@ -80,7 +83,10 @@ "parent": "//cloudresourcemanager.googleapis.com/projects/{{.Provider.project}}", "data": { "bigqueryProfile": {}, - "displayName": "Connection profile" + "displayName": "Connection profile", + "labels": { + "goog-terraform-provisioned": "true" + } } }, "ancestors": ["organizations/{{.OrgID}}"], @@ -97,6 +103,9 @@ "data": { "bigqueryProfile": null, "displayName": "Source connection profile", + "labels": { + "goog-terraform-provisioned": "true" + }, "mysqlProfile": { "port": 3306, "username": "my-user" @@ -125,6 +134,9 @@ } }, "displayName": "postgres to bigQuery", + "labels": { + "goog-terraform-provisioned": "true" + }, "sourceConfig": { "mysqlSourceConfig": { "maxConcurrentBackfillTasks": 0, diff --git a/mmv1/third_party/tgc/tests/data/example_google_datastream_stream_append_only.json b/mmv1/third_party/tgc/tests/data/example_google_datastream_stream_append_only.json index bced423d4fde..9e575395481d 100644 --- a/mmv1/third_party/tgc/tests/data/example_google_datastream_stream_append_only.json +++ b/mmv1/third_party/tgc/tests/data/example_google_datastream_stream_append_only.json @@ -13,6 +13,9 @@ }, "description": "Database of postgres", "friendlyName": "stpostgres", + "labels": { + "goog-terraform-provisioned": "true" + }, "location": "us-central1" } }, @@ -80,7 +83,10 @@ "parent": "//cloudresourcemanager.googleapis.com/projects/{{.Provider.project}}", "data": { "bigqueryProfile": {}, - "displayName": "Connection profile" + "displayName": "Connection profile", + "labels": { + "goog-terraform-provisioned": "true" + } } }, "ancestors": ["organizations/{{.OrgID}}"], @@ -97,6 +103,9 @@ "data": { "bigqueryProfile": null, "displayName": "Source connection profile", + "labels": { + "goog-terraform-provisioned": "true" + }, "mysqlProfile": { "port": 3306, "username": "my-user" @@ -125,6 +134,9 @@ } }, "displayName": "postgres to bigQuery", + "labels": { + "goog-terraform-provisioned": "true" + }, "sourceConfig": { "mysqlSourceConfig": { "maxConcurrentBackfillTasks": 0, diff --git a/mmv1/third_party/tgc/tests/data/example_kms_crypto_key.json b/mmv1/third_party/tgc/tests/data/example_kms_crypto_key.json index 89120584aaf3..d06ae073eedc 100644 --- a/mmv1/third_party/tgc/tests/data/example_kms_crypto_key.json +++ b/mmv1/third_party/tgc/tests/data/example_kms_crypto_key.json @@ -9,6 +9,9 @@ "discovery_name": "CryptoKey", "parent": "//cloudresourcemanager.googleapis.com/projects/{{.Provider.project}}", "data": { + "labels": { + "goog-terraform-provisioned": "true" + }, "purpose": "ENCRYPT_DECRYPT" } } diff --git a/mmv1/third_party/tgc/tests/data/example_kms_crypto_key_iam_binding.json b/mmv1/third_party/tgc/tests/data/example_kms_crypto_key_iam_binding.json index cd765e5bc893..9507ba8d939e 100644 --- a/mmv1/third_party/tgc/tests/data/example_kms_crypto_key_iam_binding.json +++ b/mmv1/third_party/tgc/tests/data/example_kms_crypto_key_iam_binding.json @@ -9,6 +9,9 @@ "discovery_name": "CryptoKey", "parent": "//cloudresourcemanager.googleapis.com/projects/{{.Provider.project}}", "data": { + "labels": { + "goog-terraform-provisioned": "true" + }, "purpose": "ENCRYPT_DECRYPT" } }, diff --git a/mmv1/third_party/tgc/tests/data/example_kms_crypto_key_iam_member.json b/mmv1/third_party/tgc/tests/data/example_kms_crypto_key_iam_member.json index f743cc71d904..1194a09bb9ba 100644 --- a/mmv1/third_party/tgc/tests/data/example_kms_crypto_key_iam_member.json +++ b/mmv1/third_party/tgc/tests/data/example_kms_crypto_key_iam_member.json @@ -9,6 +9,9 @@ "discovery_name": "CryptoKey", "parent": "//cloudresourcemanager.googleapis.com/projects/{{.Provider.project}}", "data": { + "labels": { + "goog-terraform-provisioned": "true" + }, "purpose": "ENCRYPT_DECRYPT" } }, diff --git a/mmv1/third_party/tgc/tests/data/example_kms_crypto_key_iam_policy.json b/mmv1/third_party/tgc/tests/data/example_kms_crypto_key_iam_policy.json index 64c09c1bf1bd..5723bd1aa6ad 100644 --- a/mmv1/third_party/tgc/tests/data/example_kms_crypto_key_iam_policy.json +++ b/mmv1/third_party/tgc/tests/data/example_kms_crypto_key_iam_policy.json @@ -9,6 +9,9 @@ "discovery_name": "CryptoKey", "parent": "//cloudresourcemanager.googleapis.com/projects/{{.Provider.project}}", "data": { + "labels": { + "goog-terraform-provisioned": "true" + }, "purpose": "ENCRYPT_DECRYPT" } }, diff --git a/mmv1/third_party/tgc/tests/data/example_monitoring_notification_channel.json b/mmv1/third_party/tgc/tests/data/example_monitoring_notification_channel.json index 041873ba4bba..232e143ece4a 100644 --- a/mmv1/third_party/tgc/tests/data/example_monitoring_notification_channel.json +++ b/mmv1/third_party/tgc/tests/data/example_monitoring_notification_channel.json @@ -12,8 +12,7 @@ "displayName": "monitoring notification channel", "enabled": true, "labels": { - "email_address": "foo@bar.com", - "goog-terraform-provisioned": "true" + "email_address": "foo@bar.com" }, "type": "email" } diff --git a/mmv1/third_party/tgc/tests/data/example_project_create.json b/mmv1/third_party/tgc/tests/data/example_project_create.json index 0af0da564c6f..6fee6849b8bf 100644 --- a/mmv1/third_party/tgc/tests/data/example_project_create.json +++ b/mmv1/third_party/tgc/tests/data/example_project_create.json @@ -27,7 +27,6 @@ "data": { "name": "My Project", "labels": { - "goog-terraform-provisioned": "true", "project-label-key-a": "project-label-val-a" }, "projectId": "{{.Provider.project}}" diff --git a/mmv1/third_party/tgc/tests/data/example_project_create_empty_project_id.json b/mmv1/third_party/tgc/tests/data/example_project_create_empty_project_id.json index 1ce3e093d429..2542819d2d72 100644 --- a/mmv1/third_party/tgc/tests/data/example_project_create_empty_project_id.json +++ b/mmv1/third_party/tgc/tests/data/example_project_create_empty_project_id.json @@ -26,7 +26,6 @@ "data": { "name": "My Project", "labels": { - "goog-terraform-provisioned": "true", "project-label-key-a": "project-label-val-a" } } diff --git a/mmv1/third_party/tgc/tests/data/example_project_create_empty_project_id_without_default_project.json b/mmv1/third_party/tgc/tests/data/example_project_create_empty_project_id_without_default_project.json index aa282c12d975..066d2865d3e4 100644 --- a/mmv1/third_party/tgc/tests/data/example_project_create_empty_project_id_without_default_project.json +++ b/mmv1/third_party/tgc/tests/data/example_project_create_empty_project_id_without_default_project.json @@ -24,7 +24,6 @@ "parent": "//cloudresourcemanager.googleapis.com/organizations/unknown", "data": { "labels": { - "goog-terraform-provisioned": "true", "project-label-key-a": "project-label-val-a" }, "name": "My Project" diff --git a/mmv1/third_party/tgc/tests/data/example_project_update.json b/mmv1/third_party/tgc/tests/data/example_project_update.json index 003a72b3e919..0ca39ff9d442 100644 --- a/mmv1/third_party/tgc/tests/data/example_project_update.json +++ b/mmv1/third_party/tgc/tests/data/example_project_update.json @@ -26,7 +26,6 @@ "parent": "//cloudresourcemanager.googleapis.com/organizations/12345", "data": { "labels": { - "goog-terraform-provisioned": "true", "project-label-key-a": "project-label-val-a" }, "name": "My New Project", diff --git a/mmv1/third_party/tgc/tests/data/example_spanner_database.json b/mmv1/third_party/tgc/tests/data/example_spanner_database.json index a65dc8e8538d..923a24bcf7b5 100644 --- a/mmv1/third_party/tgc/tests/data/example_spanner_database.json +++ b/mmv1/third_party/tgc/tests/data/example_spanner_database.json @@ -30,6 +30,9 @@ "instance": { "config": "projects/{{.Provider.project}}/instanceConfigs/regional-europe-west1", "displayName": "main-instance", + "labels": { + "goog-terraform-provisioned": "true" + }, "nodeCount": 1 }, "instanceId": "" diff --git a/mmv1/third_party/tgc/tests/data/example_spanner_database_iam_binding.json b/mmv1/third_party/tgc/tests/data/example_spanner_database_iam_binding.json index c299e1cf3fd2..c61d0194834b 100644 --- a/mmv1/third_party/tgc/tests/data/example_spanner_database_iam_binding.json +++ b/mmv1/third_party/tgc/tests/data/example_spanner_database_iam_binding.json @@ -12,6 +12,9 @@ "instance": { "config": "projects/{{.Provider.project}}/instanceConfigs/regional-europe-west1", "displayName": "main-instance", + "labels": { + "goog-terraform-provisioned": "true" + }, "nodeCount": 1 }, "instanceId": "my-instance" diff --git a/mmv1/third_party/tgc/tests/data/example_spanner_database_iam_member.json b/mmv1/third_party/tgc/tests/data/example_spanner_database_iam_member.json index c299e1cf3fd2..c61d0194834b 100644 --- a/mmv1/third_party/tgc/tests/data/example_spanner_database_iam_member.json +++ b/mmv1/third_party/tgc/tests/data/example_spanner_database_iam_member.json @@ -12,6 +12,9 @@ "instance": { "config": "projects/{{.Provider.project}}/instanceConfigs/regional-europe-west1", "displayName": "main-instance", + "labels": { + "goog-terraform-provisioned": "true" + }, "nodeCount": 1 }, "instanceId": "my-instance" diff --git a/mmv1/third_party/tgc/tests/data/example_spanner_database_iam_policy.json b/mmv1/third_party/tgc/tests/data/example_spanner_database_iam_policy.json index 36751e8e7bfe..112bd99eabf0 100644 --- a/mmv1/third_party/tgc/tests/data/example_spanner_database_iam_policy.json +++ b/mmv1/third_party/tgc/tests/data/example_spanner_database_iam_policy.json @@ -12,6 +12,9 @@ "instance": { "config": "projects/{{.Provider.project}}/instanceConfigs/regional-europe-west1", "displayName": "main-instance", + "labels": { + "goog-terraform-provisioned": "true" + }, "nodeCount": 1 }, "instanceId": "my-instance" diff --git a/mmv1/third_party/tgc/tests/data/example_spanner_instance_iam_binding.json b/mmv1/third_party/tgc/tests/data/example_spanner_instance_iam_binding.json index 73a1d166181f..afd193846393 100644 --- a/mmv1/third_party/tgc/tests/data/example_spanner_instance_iam_binding.json +++ b/mmv1/third_party/tgc/tests/data/example_spanner_instance_iam_binding.json @@ -12,6 +12,9 @@ "instance": { "config": "projects/{{.Provider.project}}/instanceConfigs/regional-us-central1", "displayName": "spanner-instance", + "labels": { + "goog-terraform-provisioned": "true" + }, "nodeCount": 1 }, "instanceId": "spanner-instance" diff --git a/mmv1/third_party/tgc/tests/data/example_spanner_instance_iam_member.json b/mmv1/third_party/tgc/tests/data/example_spanner_instance_iam_member.json index 73a1d166181f..afd193846393 100644 --- a/mmv1/third_party/tgc/tests/data/example_spanner_instance_iam_member.json +++ b/mmv1/third_party/tgc/tests/data/example_spanner_instance_iam_member.json @@ -12,6 +12,9 @@ "instance": { "config": "projects/{{.Provider.project}}/instanceConfigs/regional-us-central1", "displayName": "spanner-instance", + "labels": { + "goog-terraform-provisioned": "true" + }, "nodeCount": 1 }, "instanceId": "spanner-instance" diff --git a/mmv1/third_party/tgc/tests/data/example_spanner_instance_iam_policy.json b/mmv1/third_party/tgc/tests/data/example_spanner_instance_iam_policy.json index 2647e1bd1c6b..e51a9bb88d7a 100644 --- a/mmv1/third_party/tgc/tests/data/example_spanner_instance_iam_policy.json +++ b/mmv1/third_party/tgc/tests/data/example_spanner_instance_iam_policy.json @@ -12,6 +12,9 @@ "instance": { "config": "projects/{{.Provider.project}}/instanceConfigs/regional-us-central1", "displayName": "spanner-instance", + "labels": { + "goog-terraform-provisioned": "true" + }, "nodeCount": 1 }, "instanceId": "spanner-instance" diff --git a/mmv1/third_party/tgc/tests/data/full_compute_instance.json b/mmv1/third_party/tgc/tests/data/full_compute_instance.json index 28abeae8a571..293347a55e57 100644 --- a/mmv1/third_party/tgc/tests/data/full_compute_instance.json +++ b/mmv1/third_party/tgc/tests/data/full_compute_instance.json @@ -74,7 +74,6 @@ ], "hostname": "test-hostname", "labels": { - "goog-terraform-provisioned": "true", "label_foo1": "label-bar1" }, "machineType": "projects/{{.Provider.project}}/zones/us-central1-a/machineTypes/n1-standard-1", diff --git a/mmv1/third_party/tgc/tests/data/full_container_cluster.json b/mmv1/third_party/tgc/tests/data/full_container_cluster.json index 7d9c4ec09ce6..ccae834748ac 100644 --- a/mmv1/third_party/tgc/tests/data/full_container_cluster.json +++ b/mmv1/third_party/tgc/tests/data/full_container_cluster.json @@ -79,7 +79,6 @@ "diskType": "pd-standard", "imageType": "test-image_type", "labels": { - "goog-terraform-provisioned": "true", "test-label-key": "test-label-value" }, "localSsdCount": 42, diff --git a/mmv1/third_party/tgc/tests/data/full_container_node_pool.json b/mmv1/third_party/tgc/tests/data/full_container_node_pool.json index 1a35f3548de1..a9bb79031e0e 100644 --- a/mmv1/third_party/tgc/tests/data/full_container_node_pool.json +++ b/mmv1/third_party/tgc/tests/data/full_container_node_pool.json @@ -29,7 +29,6 @@ "diskType": "pd-standard", "imageType": "test-image_type", "labels": { - "goog-terraform-provisioned": "true", "test-label-key": "test-label-value" }, "localSsdCount": 42, diff --git a/mmv1/third_party/tgc/tests/data/full_sql_database_instance.json b/mmv1/third_party/tgc/tests/data/full_sql_database_instance.json index 34f0e15b9243..ce26a729029e 100644 --- a/mmv1/third_party/tgc/tests/data/full_sql_database_instance.json +++ b/mmv1/third_party/tgc/tests/data/full_sql_database_instance.json @@ -98,6 +98,9 @@ "parent": "//cloudresourcemanager.googleapis.com/projects/{{.Provider.project}}", "data": { "addressType": "INTERNAL", + "labels": { + "goog-terraform-provisioned": "true" + }, "name": "private-ip-address", "prefixLength": 16, "purpose": "VPC_PEERING" diff --git a/mmv1/third_party/tgc/tests/data/full_storage_bucket.json b/mmv1/third_party/tgc/tests/data/full_storage_bucket.json index 598f472ae705..7a25c4dfec85 100644 --- a/mmv1/third_party/tgc/tests/data/full_storage_bucket.json +++ b/mmv1/third_party/tgc/tests/data/full_storage_bucket.json @@ -53,7 +53,6 @@ } }, "labels": { - "goog-terraform-provisioned": "true", "label_foo1": "label-bar1" }, "lifecycle": { From 68282ea6e38b34150888bdff9bae1a27c73bb7f9 Mon Sep 17 00:00:00 2001 From: vijaykanthm Date: Fri, 30 Aug 2024 11:06:26 -0700 Subject: [PATCH 07/60] Add Resource V2 SCC Findings Export to Big Query Folder Config (#11517) Co-authored-by: Sarah French <15078782+SarahFrench@users.noreply.github.com> --- .../FolderSccBigQueryExports.yaml | 147 ++++++++++++++++ ...older_big_query_export_config_basic.tf.erb | 32 ++++ ..._v2_folder_big_query_export_config_test.go | 157 ++++++++++++++++++ 3 files changed, 336 insertions(+) create mode 100644 mmv1/products/securitycenterv2/FolderSccBigQueryExports.yaml create mode 100644 mmv1/templates/terraform/examples/scc_v2_folder_big_query_export_config_basic.tf.erb create mode 100644 mmv1/third_party/terraform/services/securitycenterv2/resource_scc_v2_folder_big_query_export_config_test.go diff --git a/mmv1/products/securitycenterv2/FolderSccBigQueryExports.yaml b/mmv1/products/securitycenterv2/FolderSccBigQueryExports.yaml new file mode 100644 index 000000000000..98813ed865d0 --- /dev/null +++ b/mmv1/products/securitycenterv2/FolderSccBigQueryExports.yaml @@ -0,0 +1,147 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- !ruby/object:Api::Resource +name: 'FolderSccBigQueryExports' +base_url: folders/{{folder}}/locations/{{location}}/bigQueryExports +self_link: folders/{{folder}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}} +create_url: folders/{{folder}}/locations/{{location}}/bigQueryExports?bigQueryExportId={{big_query_export_id}} +update_verb: :PATCH +update_mask: true +import_format: + - 'folders/{{folder}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}' +description: | + A Cloud Security Command Center (Cloud SCC) Big Query Export Config. + It represents exporting Security Command Center data, including assets, findings, and security marks + using gcloud scc bqexports + ~> **Note:** In order to use Cloud SCC resources, your organization must be enrolled + in [SCC Standard/Premium](https://cloud.google.com/security-command-center/docs/quickstart-security-command-center). + Without doing so, you may run into errors during resource creation. +references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': 'https://cloud.google.com/security-command-center/docs/how-to-analyze-findings-in-big-query' + api: 'https://cloud.google.com/security-command-center/docs/reference/rest/v2/folders.locations.bigQueryExports' +examples: + - !ruby/object:Provider::Terraform::Examples + name: 'scc_v2_folder_big_query_export_config_basic' + primary_resource_id: 'custom_big_query_export_config' + external_providers: ["random", "time"] + skip_test: true + vars: + big_query_export_id: 'my-export' + dataset: 'my-dataset' + dataset_id: 'my_dataset_id' + name: 'my-export' + folder_display_name: "folder-name" + test_env_vars: + org_id: :ORG_ID + project: :PROJECT_NAME + +parameters: + - !ruby/object:Api::Type::String + name: folder + required: true + immutable: true + url_param_only: true + description: | + The folder where Cloud Security Command Center Big Query Export + Config lives in. + - !ruby/object:Api::Type::String + name: bigQueryExportId + required: true + immutable: true + url_param_only: true + description: | + This must be unique within the organization. It must consist of only lowercase letters, + numbers, and hyphens, must start with a letter, must end with either a letter or a number, + and must be 63 characters or less. + - !ruby/object:Api::Type::String + name: location + immutable: true + url_param_only: true + default_value: global + description: | + The BigQuery export configuration is stored in this location. If not provided, Use global as default. +properties: + - !ruby/object:Api::Type::String + name: name + output: true + description: | + The resource name of this export, in the format + `folders/{{folder}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}`. + This field is provided in responses, and is ignored when provided in create requests. + - !ruby/object:Api::Type::String + name: description + description: | + The description of the notification config (max of 1024 characters). + validation: !ruby/object:Provider::Terraform::Validation + function: 'validation.StringLenBetween(0, 1024)' + - !ruby/object:Api::Type::String + name: dataset + description: | + The dataset to write findings' updates to. + Its format is "projects/[projectId]/datasets/[bigquery_dataset_id]". + BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + - !ruby/object:Api::Type::String + name: createTime + output: true + description: | + The time at which the BigQuery export was created. This field is set by the server and will be ignored if provided on export on creation. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + - !ruby/object:Api::Type::String + name: updateTime + output: true + description: | + The most recent time at which the BigQuery export was updated. This field is set by the server and will be ignored if provided on export creation or update. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + - !ruby/object:Api::Type::String + name: mostRecentEditor + output: true + description: | + Email address of the user who last edited the BigQuery export. + This field is set by the server and will be ignored if provided on export creation or update. + - !ruby/object:Api::Type::String + name: principal + output: true + description: | + The service account that needs permission to create table and upload data to the BigQuery dataset. + - !ruby/object:Api::Type::String + name: filter + description: | + Expression that defines the filter to apply across create/update + events of findings. The + expression is a list of zero or more restrictions combined via + logical operators AND and OR. Parentheses are supported, and OR + has higher precedence than AND. + + Restrictions have the form and may have + a - character in front of them to indicate negation. The fields + map to those defined in the corresponding resource. + + The supported operators are: + + * = for all value types. + * >, <, >=, <= for integer values. + * :, meaning substring matching, for strings. + + The supported value types are: + + * string literals in quotes. + * integer literals without quotes. + * boolean literals true and false without quotes. + + See + [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) + for information on how to write a filter. diff --git a/mmv1/templates/terraform/examples/scc_v2_folder_big_query_export_config_basic.tf.erb b/mmv1/templates/terraform/examples/scc_v2_folder_big_query_export_config_basic.tf.erb new file mode 100644 index 000000000000..004659847645 --- /dev/null +++ b/mmv1/templates/terraform/examples/scc_v2_folder_big_query_export_config_basic.tf.erb @@ -0,0 +1,32 @@ +resource "google_folder" "folder" { + parent = "organizations/<%= ctx[:test_env_vars]['org_id'] %>" + display_name = "<%= ctx[:vars]['folder_display_name'] %>" + + deletion_protection = false +} + +resource "google_bigquery_dataset" "default" { + dataset_id = "<%= ctx[:vars]['dataset_id'] %>" + friendly_name = "test" + description = "This is a test description" + location = "US" + default_table_expiration_ms = 3600000 + default_partition_expiration_ms = null + + labels = { + env = "default" + } + + lifecycle { + ignore_changes = [default_partition_expiration_ms] + } +} + +resource "google_scc_v2_folder_scc_big_query_exports" "<%= ctx[:primary_resource_id] %>" { + big_query_export_id = "<%= ctx[:vars]['big_query_export_id'] %>" + folder = google_folder.folder.folder_id + dataset = google_bigquery_dataset.default.id + location = "global" + description = "Cloud Security Command Center Findings Big Query Export Config" + filter = "state=\"ACTIVE\" AND NOT mute=\"MUTED\"" +} diff --git a/mmv1/third_party/terraform/services/securitycenterv2/resource_scc_v2_folder_big_query_export_config_test.go b/mmv1/third_party/terraform/services/securitycenterv2/resource_scc_v2_folder_big_query_export_config_test.go new file mode 100644 index 000000000000..70ec6bb4ee85 --- /dev/null +++ b/mmv1/third_party/terraform/services/securitycenterv2/resource_scc_v2_folder_big_query_export_config_test.go @@ -0,0 +1,157 @@ +package securitycenterv2_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" +) + +func TestAccSecurityCenterV2FolderBigQueryExportConfig_basic(t *testing.T) { + t.Parallel() + + randomSuffix := acctest.RandString(t, 10) + dataset_id := "tf_test_" + randomSuffix + orgID := envvar.GetTestOrgFromEnv(t) + + context := map[string]interface{}{ + "org_id": orgID, + "random_suffix": randomSuffix, + "dataset_id": dataset_id, + "big_query_export_id": "tf-test-export-" + randomSuffix, + "folder_name": "tf-test-folder-name-" + randomSuffix, + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + ExternalProviders: map[string]resource.ExternalProvider{ + "random": {}, + "time": {}, + }, + Steps: []resource.TestStep{ + { + Config: testAccSecurityCenterV2FolderBigQueryExportConfig_basic(context), + }, + { + ResourceName: "google_scc_v2_folder_scc_big_query_exports.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"update_time"}, + }, + { + Config: testAccSecurityCenterV2FolderBigQueryExportConfig_update(context), + }, + { + ResourceName: "google_scc_v2_folder_scc_big_query_exports.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"update_time"}, + }, + }, + }) +} + +func testAccSecurityCenterV2FolderBigQueryExportConfig_basic(context map[string]interface{}) string { + return acctest.Nprintf(` + +resource "google_folder" "folder" { + parent = "organizations/%{org_id}" + display_name = "%{folder_name}" + + deletion_protection = false +} + +resource "google_bigquery_dataset" "default" { + dataset_id = "%{dataset_id}" + friendly_name = "test" + description = "This is a test description" + location = "US" + default_table_expiration_ms = 3600000 + default_partition_expiration_ms = null + + labels = { + env = "default" + } + + lifecycle { + ignore_changes = [default_partition_expiration_ms] + } +} + +resource "time_sleep" "wait_1_minute" { + depends_on = [google_bigquery_dataset.default] + create_duration = "3m" +} + +resource "google_scc_v2_folder_scc_big_query_exports" "default" { + big_query_export_id = "%{big_query_export_id}" + folder = google_folder.folder.folder_id + dataset = google_bigquery_dataset.default.id + location = "global" + description = "Cloud Security Command Center Findings Big Query Export Config" + filter = "state=\"ACTIVE\" AND NOT mute=\"MUTED\"" + + lifecycle { + ignore_changes = [name] + } + + depends_on = [time_sleep.wait_1_minute] + +} + +resource "time_sleep" "wait_for_cleanup" { + create_duration = "3m" + depends_on = [google_scc_v2_folder_scc_big_query_exports.default] +} +`, context) +} + +func testAccSecurityCenterV2FolderBigQueryExportConfig_update(context map[string]interface{}) string { + return acctest.Nprintf(` + +resource "google_folder" "folder" { + parent = "organizations/%{org_id}" + display_name = "%{folder_name}" + + deletion_protection = false +} + +resource "google_bigquery_dataset" "default" { + dataset_id = "%{dataset_id}" + friendly_name = "test" + description = "This is a test description" + location = "US" + default_table_expiration_ms = 3600000 + default_partition_expiration_ms = null + + labels = { + env = "default" + } + + lifecycle { + ignore_changes = [default_partition_expiration_ms] + } +} + +resource "google_scc_v2_folder_scc_big_query_exports" "default" { + big_query_export_id = "%{big_query_export_id}" + folder = google_folder.folder.folder_id + dataset = google_bigquery_dataset.default.id + location = "global" + description = "SCC Findings Big Query Export Update" + filter = "state=\"ACTIVE\" AND NOT mute=\"MUTED\"" + + lifecycle { + ignore_changes = [name] + } + +} + +resource "time_sleep" "wait_for_cleanup" { + create_duration = "3m" + depends_on = [google_scc_v2_folder_scc_big_query_exports.default] +} +`, context) +} From eee25d36162400989019fbfd23aaafbd7e864e3f Mon Sep 17 00:00:00 2001 From: Will Yardley Date: Fri, 30 Aug 2024 11:32:53 -0700 Subject: [PATCH 08/60] docs: add note about acceptance tests (#11475) --- docs/content/develop/test/run-tests.md | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/docs/content/develop/test/run-tests.md b/docs/content/develop/test/run-tests.md index 0165ac976b8b..2b8d50d31803 100644 --- a/docs/content/develop/test/run-tests.md +++ b/docs/content/develop/test/run-tests.md @@ -62,7 +62,8 @@ aliases: make testacc TEST=./google/services/container TESTARGS='-run=TestAccContainerNodePool' ``` - +> [!NOTE] +> Acceptance tests create actual infrastructure which can incur costs. Acceptance tests may not clean up after themselves if interrupted, so you may want to check for stray resources and / or billing charges. 1. Optional: Save verbose test output (including API requests and responses) to a file for analysis. @@ -89,14 +90,13 @@ aliases: make lint ``` - 1. Run acceptance tests for only modified resources. (Full test runs can take over 9 hours.) See [Go's documentation](https://pkg.go.dev/cmd/go#hdr-Testing_flags) for more information about `-run` and other flags. ```bash make testacc TEST=./google-beta/services/container TESTARGS='-run=TestAccContainerNodePool' ``` - - +> [!NOTE] +> Acceptance tests create actual infrastructure which can incur costs. Acceptance tests may not clean up after themselves if interrupted, so you may want to check for stray resources and / or billing charges. 1. Optional: Save verbose test output to a file for analysis. @@ -238,7 +238,6 @@ Configure Terraform to use locally-built binaries for `google` and `google-beta` ### Run manual tests - 1. [Generate the provider(s) you want to test]({{< ref "/get-started/generate-providers" >}}) 2. Build the provider(s) you want to test @@ -291,6 +290,5 @@ To stop using developer overrides, stop setting `TF_CLI_CONFIG_FILE` in the comm Terraform will resume its normal behaviour of pulling published provider versions from the public Registry. Any version constraints in your Terraform configuration will come back into effect. Also, you may need to run `terraform init` to download the required version of the provider into your project directory if you haven't already. - ## What's next? - [Create a pull request]({{< ref "/contribute/create-pr" >}}) From ac5f4eab72e9bd08ee51e1ae699c3f18a36b6b21 Mon Sep 17 00:00:00 2001 From: haiyanmeng Date: Fri, 30 Aug 2024 14:42:54 -0400 Subject: [PATCH 09/60] Update the documentation for gkehub_feature_membership (#11594) --- .../gke_hub_feature_membership.html.markdown | 52 ++++++++++++++----- 1 file changed, 38 insertions(+), 14 deletions(-) diff --git a/mmv1/third_party/terraform/website/docs/r/gke_hub_feature_membership.html.markdown b/mmv1/third_party/terraform/website/docs/r/gke_hub_feature_membership.html.markdown index c52858a80f5c..921655e53e0a 100644 --- a/mmv1/third_party/terraform/website/docs/r/gke_hub_feature_membership.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/gke_hub_feature_membership.html.markdown @@ -40,8 +40,9 @@ resource "google_gke_hub_feature_membership" "feature_member" { feature = google_gke_hub_feature.feature.name membership = google_gke_hub_membership.membership.membership_id configmanagement { - version = "1.6.2" + version = "1.19.0" config_sync { + enabled = true git { sync_repo = "https://github.com/hashicorp/terraform" } @@ -81,8 +82,9 @@ resource "google_gke_hub_feature_membership" "feature_member" { feature = google_gke_hub_feature.feature.name membership = google_gke_hub_membership.membership.membership_id configmanagement { - version = "1.15.1" + version = "1.19.0" config_sync { + enabled = true oci { sync_repo = "us-central1-docker.pkg.dev/sample-project/config-repo/config-sync-gke:latest" policy_dir = "config-connector" @@ -174,8 +176,9 @@ resource "google_gke_hub_feature_membership" "feature_member" { membership = google_gke_hub_membership.membership.membership_id membership_location = google_gke_hub_membership.membership.location configmanagement { - version = "1.6.2" + version = "1.19.0" config_sync { + enabled = true git { sync_repo = "https://github.com/hashicorp/terraform" } @@ -306,26 +309,41 @@ The following arguments are supported: The `configmanagement` block supports: - -* `binauthz` - - (Optional) - Binauthz configuration for the cluster. Structure is [documented below](#nested_binauthz). - + * `config_sync` - (Optional) Config Sync configuration for the cluster. Structure is [documented below](#nested_config_sync). - + +* `management` - + (Optional) + Set this field to MANAGEMENT_AUTOMATIC to enable + [Config Sync auto-upgrades](http://cloud/kubernetes-engine/enterprise/config-sync/docs/how-to/upgrade-config-sync#auto-upgrade-config), + and set this field to MANAGEMENT_MANUAL or MANAGEMENT_UNSPECIFIED to disable Config Sync auto-upgrades. + This field was introduced in Terraform version [5.41.0](https://github.com/hashicorp/terraform-provider-google/releases/tag/v5.41.0). + +* `version` - + (Optional) + Version of ACM installed. + +* `binauthz` - + (Optional, Deprecated) + Binauthz configuration for the cluster. Structure is [documented below](#nested_binauthz). + This field will be ignored and should not be set. + * `hierarchy_controller` - (Optional) Hierarchy Controller configuration for the cluster. Structure is [documented below](#nested_hierarchy_controller). + Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. + Use open source Kubernetes [Hierarchical Namespace Controller (HNC)](https://github.com/kubernetes-sigs/hierarchical-namespaces) instead. + Follow the [instructions](https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/migrate-hierarchy-controller) + to migrate from Hierarchy Controller to HNC. * `policy_controller` - (Optional) Policy Controller configuration for the cluster. Structure is [documented below](#nested_policy_controller). - -* `version` - - (Optional) - Version of ACM installed. + Configuring Policy Controller through the configmanagement feature is no longer recommended. + Use the policycontroller feature instead. + The `binauthz` block supports: @@ -334,7 +352,13 @@ The following arguments are supported: Whether binauthz is enabled in this cluster. The `config_sync` block supports: - + +* `enabled` - + (Optional) + Whether Config Sync is enabled in the cluster. This field was introduced in Terraform version + [5.41.0](https://github.com/hashicorp/terraform-provider-google/releases/tag/v5.41.0), and + needs to be set to `true` explicitly to install Config Sync. + * `git` - (Optional) Structure is [documented below](#nested_git). From f0933ec677b26579078e7c02f6361d7fa001c902 Mon Sep 17 00:00:00 2001 From: Ryan Oaks Date: Fri, 30 Aug 2024 14:56:44 -0400 Subject: [PATCH 10/60] Fix google_compute_interconnect to support the correct feature values (#11568) --- mmv1/products/compute/Interconnect.yaml | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/mmv1/products/compute/Interconnect.yaml b/mmv1/products/compute/Interconnect.yaml index ba2dd2cf47b7..809df72e6e80 100644 --- a/mmv1/products/compute/Interconnect.yaml +++ b/mmv1/products/compute/Interconnect.yaml @@ -372,16 +372,18 @@ properties: - !ruby/object:Api::Type::Array name: 'requestedFeatures' description: | - interconnects.list of features requested for this Interconnect connection. Options: MACSEC ( + interconnects.list of features requested for this Interconnect connection. Options: IF_MACSEC ( If specified then the connection is created on MACsec capable hardware ports. If not specified, the default value is false, which allocates non-MACsec capable ports first if - available). + available). Note that MACSEC is still technically allowed for compatibility reasons, but it + does not work with the API, and will be removed in an upcoming major version. item_type: !ruby/object:Api::Type::Enum name: 'requestedFeatures' description: | interconnects.list of features requested for this Interconnect connection values: - :MACSEC + - :IF_MACSEC - !ruby/object:Api::Type::Array name: 'availableFeatures' description: | @@ -390,9 +392,4 @@ properties: ports. If not present then the Interconnect connection is provisioned on non-MACsec capable ports and MACsec isn't supported and enabling MACsec fails). output: true - item_type: !ruby/object:Api::Type::Enum - name: 'availableFeatures' - description: | - interconnects.list of features available for this Interconnect connection, - values: - - :MACSEC + item_type: Api::Type::String From e1694376f735ab3c6c587baff0551b51a28a0904 Mon Sep 17 00:00:00 2001 From: Liyun Huang Date: Fri, 30 Aug 2024 15:50:27 -0400 Subject: [PATCH 11/60] Added backupvault resource to Terraform backupdr product (beta) (#11317) --- mmv1/products/backupdr/BackupVault.yaml | 143 ++++++++++++++++++ mmv1/products/backupdr/product.yaml | 2 +- .../backup_dr_backup_vault_full.tf.erb | 18 +++ ...esource_backup_dr_backup_vault_test.go.erb | 98 ++++++++++++ 4 files changed, 260 insertions(+), 1 deletion(-) create mode 100644 mmv1/products/backupdr/BackupVault.yaml create mode 100644 mmv1/templates/terraform/examples/backup_dr_backup_vault_full.tf.erb create mode 100644 mmv1/third_party/terraform/services/backupdr/resource_backup_dr_backup_vault_test.go.erb diff --git a/mmv1/products/backupdr/BackupVault.yaml b/mmv1/products/backupdr/BackupVault.yaml new file mode 100644 index 000000000000..253d6a3dfc6b --- /dev/null +++ b/mmv1/products/backupdr/BackupVault.yaml @@ -0,0 +1,143 @@ +--- !ruby/object:Api::Resource +base_url: projects/{{project}}/locations/{{location}}/backupVaults +create_url: projects/{{project}}/locations/{{location}}/backupVaults?backupVaultId={{backup_vault_id}} +update_url: projects/{{project}}/locations/{{location}}/backupVaults/{{backup_vault_id}}?force={{force_update}} +delete_url: projects/{{project}}/locations/{{location}}/backupVaults/{{backup_vault_id}}?force={{force_delete}}&allowMissing={{allow_missing}} +self_link: projects/{{project}}/locations/{{location}}/backupVaults/{{backup_vault_id}} +id_format: projects/{{project}}/locations/{{location}}/backupVaults/{{backup_vault_id}} +import_format: + - projects/{{project}}/locations/{{location}}/backupVaults/{{backup_vault_id}} +name: BackupVault +description: Container to store and organize immutable and indelible backups. +autogen_async: true +examples: + - !ruby/object:Provider::Terraform::Examples + min_version: beta + name: 'backup_dr_backup_vault_full' + primary_resource_id: 'backup-vault-test' + vars: + backup_vault_id: 'backup-vault-test' + test_env_vars: + project: :PROJECT_NAME +properties: + - !ruby/object:Api::Type::String + name: name + description: 'Output only. Identifier. The resource name. ' + output: true + - !ruby/object:Api::Type::String + name: description + description: 'Optional. The description of the BackupVault instance (2048 characters + or less). ' + - !ruby/object:Api::Type::KeyValueLabels + name: labels + description: "Optional. Resource labels to represent user provided metadata. " + - !ruby/object:Api::Type::String + name: createTime + description: 'Output only. The time when the instance was created. ' + output: true + - !ruby/object:Api::Type::String + name: updateTime + description: 'Output only. The time when the instance was updated. ' + output: true + - !ruby/object:Api::Type::String + name: backupMinimumEnforcedRetentionDuration + description: "Required. The default and minimum enforced retention for each backup + within the backup vault. The enforced retention for each backup can be extended. " + required: true + - !ruby/object:Api::Type::Boolean + name: deletable + description: 'Output only. Set to true when there are no backups nested under this + resource. ' + output: true + - !ruby/object:Api::Type::String + name: etag + output: true + description: "Optional. Server specified ETag for the backup vault resource to prevent + simultaneous updates from overwiting each other. " + - !ruby/object:Api::Type::String + name: state + description: "Output only. The BackupVault resource instance state. \n Possible + values:\n STATE_UNSPECIFIED\n CREATING\n ACTIVE\n DELETING\n ERROR" + output: true + - !ruby/object:Api::Type::String + name: effectiveTime + description: 'Optional. Time after which the BackupVault resource is locked. ' + - !ruby/object:Api::Type::String + name: backupCount + description: 'Output only. The number of backups in this backup vault. ' + output: true + - !ruby/object:Api::Type::String + name: serviceAccount + description: "Output only. Service account used by the BackupVault Service for this + BackupVault. The user should grant this account permissions in their workload + project to enable the service to run backups and restores there. " + output: true + - !ruby/object:Api::Type::String + name: totalStoredBytes + description: 'Output only. Total size of the storage used by all backup resources. ' + output: true + - !ruby/object:Api::Type::String + name: uid + description: "Output only. Output only Immutable after resource creation until + resource deletion. " + output: true + - !ruby/object:Api::Type::KeyValueAnnotations + name: annotations + description: "Optional. User annotations. See https://google.aip.dev/128#annotations\nStores + small amounts of arbitrary data. " +parameters: + - !ruby/object:Api::Type::String + name: location + description: "The GCP location for the backup vault. " + url_param_only: true + required: true + immutable: true + - !ruby/object:Api::Type::String + name: backupVaultId + description: "Required. ID of the requesting object." + url_param_only: true + required: true + immutable: true + - !ruby/object:Api::Type::Boolean + name: 'force_update' + default_value: false + url_param_only: true + description: | + If set, allow update to extend the minimum enforced retention for backup vault. This overrides + the restriction against conflicting retention periods. This conflict may occur when the + expiration schedule defined by the associated backup plan is shorter than the minimum + retention set by the backup vault. + - !ruby/object:Api::Type::Boolean + name: 'force_delete' + default_value: false + url_param_only: true + description: | + If set, the following restrictions against deletion of the backup vault instance can be overridden: + * deletion of a backup vault instance containing no backups, but still containing empty datasources. + * deletion of a backup vault instance that is being referenced by an active backup plan. + - !ruby/object:Api::Type::Boolean + name: 'allow_missing' + default_value: false + url_param_only: true + description: | + Allow idempotent deletion of backup vault. The request will still succeed in case the backup vault does not exist. +async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: name + base_url: "{{op_id}}" + wait_ms: 1000 + timeouts: + result: !ruby/object:Api::OpAsync::Result + path: response + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: done + complete: true + allowed: + - true + - false + error: !ruby/object:Api::OpAsync::Error + path: error + message: message +update_verb: :PATCH +update_mask: true diff --git a/mmv1/products/backupdr/product.yaml b/mmv1/products/backupdr/product.yaml index 2156cac27c3b..5c1c9d13da02 100644 --- a/mmv1/products/backupdr/product.yaml +++ b/mmv1/products/backupdr/product.yaml @@ -13,7 +13,7 @@ --- !ruby/object:Api::Product name: BackupDR -display_name: Backup and DR +display_name: Backup and DR Service scopes: - https://www.googleapis.com/auth/cloud-platform versions: diff --git a/mmv1/templates/terraform/examples/backup_dr_backup_vault_full.tf.erb b/mmv1/templates/terraform/examples/backup_dr_backup_vault_full.tf.erb new file mode 100644 index 000000000000..58e1aaff5519 --- /dev/null +++ b/mmv1/templates/terraform/examples/backup_dr_backup_vault_full.tf.erb @@ -0,0 +1,18 @@ +resource "google_backup_dr_backup_vault" "<%= ctx[:primary_resource_id] %>" { + provider = google-beta + location = "us-central1" + backup_vault_id = "<%= ctx[:vars]['backup_vault_id'] %>" + description = "This is a second backup vault built by Terraform." + backup_minimum_enforced_retention_duration = "100000s" + labels = { + foo = "bar1" + bar = "baz1" + } + annotations = { + annotations1 = "bar1" + annotations2 = "baz1" + } + force_update = "true" + force_delete = "true" + allow_missing = "true" +} \ No newline at end of file diff --git a/mmv1/third_party/terraform/services/backupdr/resource_backup_dr_backup_vault_test.go.erb b/mmv1/third_party/terraform/services/backupdr/resource_backup_dr_backup_vault_test.go.erb new file mode 100644 index 000000000000..b6975f2de37a --- /dev/null +++ b/mmv1/third_party/terraform/services/backupdr/resource_backup_dr_backup_vault_test.go.erb @@ -0,0 +1,98 @@ +<% autogen_exception -%> +package backupdr_test +<% unless version == 'ga' -%> + +import ( + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "testing" + "time" +) + +func TestAccBackupDRBackupVault_fullUpdate(t *testing.T) { + t.Parallel() + + timeNow := time.Now().UTC() + referenceTime := time.Date(timeNow.Year(), timeNow.Month(), timeNow.Day(), 0, 0, 0, 0, time.UTC) + + context := map[string]interface{}{ + "project": envvar.GetTestProjectFromEnv(), + "effective_time": referenceTime.Add(24 * time.Hour).Format(time.RFC3339), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccBackupDRBackupVault_fullCreate(context), + }, + { + ResourceName: "google_backup_dr_backup_vault.backup-vault-test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"allow_missing", "annotations", "backup_vault_id", "force_delete", "force_update", "labels", "location", "terraform_labels"}, + }, + { + Config: testAccBackupDRBackupVault_fullUpdate(context), + }, + { + ResourceName: "google_backup_dr_backup_vault.backup-vault-test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"allow_missing", "annotations", "backup_vault_id", "force_delete", "force_update", "labels", "location", "terraform_labels"}, + }, + }, + }) +} + +func testAccBackupDRBackupVault_fullCreate(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_backup_dr_backup_vault" "backup-vault-test" { + provider = google-beta + location = "us-central1" + backup_vault_id = "tf-test-backup-vault-test%{random_suffix}" + description = "This is a backup vault built by Terraform." + backup_minimum_enforced_retention_duration = "100000s" + effective_time = "%{effective_time}" + labels = { + foo = "bar" + bar = "baz" + } + annotations = { + annotations1 = "bar" + annotations2 = "baz" + } + force_update = "true" + force_delete = "true" + allow_missing = "true" +} +`, context) +} + +func testAccBackupDRBackupVault_fullUpdate(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_backup_dr_backup_vault" "backup-vault-test" { + provider = google-beta + location = "us-central1" + backup_vault_id = "tf-test-backup-vault-test%{random_suffix}" + description = "This is a second backup vault built by Terraform." + backup_minimum_enforced_retention_duration = "200000s" + effective_time = "%{effective_time}" + labels = { + foo = "bar1" + bar = "baz1" + } + annotations = { + annotations1 = "bar1" + annotations2 = "baz1" + } + force_update = "true" + force_delete = "true" + allow_missing = "true" +} +`, context) +} +<% end -%> \ No newline at end of file From 692bae292bf8c33f8443e60cc53bab800b4821a2 Mon Sep 17 00:00:00 2001 From: Lauren Huang Date: Mon, 2 Sep 2024 04:50:11 -0400 Subject: [PATCH 12/60] Add support for setting Pub/Sub Cloud Storage subscription max_messages and use_topic_schema (#11583) --- mmv1/products/pubsub/Subscription.yaml | 8 ++ ...bsub_subscription_push_cloudstorage.tf.erb | 1 + ...subscription_push_cloudstorage_avro.tf.erb | 2 + .../resource_pubsub_subscription_test.go | 89 ++++++++++++++----- 4 files changed, 80 insertions(+), 20 deletions(-) diff --git a/mmv1/products/pubsub/Subscription.yaml b/mmv1/products/pubsub/Subscription.yaml index ed39701ebc28..e97292922ada 100644 --- a/mmv1/products/pubsub/Subscription.yaml +++ b/mmv1/products/pubsub/Subscription.yaml @@ -212,6 +212,10 @@ properties: description: | The maximum bytes that can be written to a Cloud Storage file before a new file is created. Min 1 KB, max 10 GiB. The maxBytes limit may be exceeded in cases where messages are larger than the limit. + - !ruby/object:Api::Type::Integer + name: 'maxMessages' + description: | + The maximum messages that can be written to a Cloud Storage file before a new file is created. Min 1000 messages. - !ruby/object:Api::Type::Enum name: 'state' description: | @@ -230,6 +234,10 @@ properties: name: 'writeMetadata' description: | When true, write the subscription name, messageId, publishTime, attributes, and orderingKey as additional fields in the output. + - !ruby/object:Api::Type::Boolean + name: 'useTopicSchema' + description: | + When true, the output Cloud Storage file will be serialized using the topic schema, if it exists. - !ruby/object:Api::Type::String name: 'serviceAccountEmail' description: | diff --git a/mmv1/templates/terraform/examples/pubsub_subscription_push_cloudstorage.tf.erb b/mmv1/templates/terraform/examples/pubsub_subscription_push_cloudstorage.tf.erb index e95f12169611..2f000e5adcf7 100644 --- a/mmv1/templates/terraform/examples/pubsub_subscription_push_cloudstorage.tf.erb +++ b/mmv1/templates/terraform/examples/pubsub_subscription_push_cloudstorage.tf.erb @@ -21,6 +21,7 @@ resource "google_pubsub_subscription" "<%= ctx[:primary_resource_id] %>" { max_bytes = 1000 max_duration = "300s" + max_messages = 1000 } depends_on = [ google_storage_bucket.<%= ctx[:primary_resource_id] %>, diff --git a/mmv1/templates/terraform/examples/pubsub_subscription_push_cloudstorage_avro.tf.erb b/mmv1/templates/terraform/examples/pubsub_subscription_push_cloudstorage_avro.tf.erb index b26352b1483e..3a61c489517e 100644 --- a/mmv1/templates/terraform/examples/pubsub_subscription_push_cloudstorage_avro.tf.erb +++ b/mmv1/templates/terraform/examples/pubsub_subscription_push_cloudstorage_avro.tf.erb @@ -21,9 +21,11 @@ resource "google_pubsub_subscription" "<%= ctx[:primary_resource_id] %>" { max_bytes = 1000 max_duration = "300s" + max_messages = 1000 avro_config { write_metadata = true + use_topic_schema = true } } depends_on = [ diff --git a/mmv1/third_party/terraform/services/pubsub/resource_pubsub_subscription_test.go b/mmv1/third_party/terraform/services/pubsub/resource_pubsub_subscription_test.go index fa98e1d0d51f..af40a3411333 100644 --- a/mmv1/third_party/terraform/services/pubsub/resource_pubsub_subscription_test.go +++ b/mmv1/third_party/terraform/services/pubsub/resource_pubsub_subscription_test.go @@ -250,7 +250,7 @@ func TestAccPubsubSubscriptionBigQuery_serviceAccount(t *testing.T) { }) } -func TestAccPubsubSubscriptionCloudStorage_update(t *testing.T) { +func TestAccPubsubSubscriptionCloudStorage_updateText(t *testing.T) { t.Parallel() bucket := fmt.Sprintf("tf-test-bucket-%s", acctest.RandString(t, 10)) @@ -263,7 +263,7 @@ func TestAccPubsubSubscriptionCloudStorage_update(t *testing.T) { CheckDestroy: testAccCheckPubsubSubscriptionDestroyProducer(t), Steps: []resource.TestStep{ { - Config: testAccPubsubSubscriptionCloudStorage_basic(bucket, topic, subscriptionShort, "", "", "", 0, "", ""), + Config: testAccPubsubSubscriptionCloudStorage_basic(bucket, topic, subscriptionShort, "", "", "", 0, "", 0, "", "text"), }, { ResourceName: "google_pubsub_subscription.foo", @@ -272,7 +272,41 @@ func TestAccPubsubSubscriptionCloudStorage_update(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccPubsubSubscriptionCloudStorage_basic(bucket, topic, subscriptionShort, "pre-", "-suffix", "YYYY-MM-DD/hh_mm_ssZ", 1000, "300s", ""), + Config: testAccPubsubSubscriptionCloudStorage_basic(bucket, topic, subscriptionShort, "pre-", "-suffix", "YYYY-MM-DD/hh_mm_ssZ", 1000, "300s", 1000, "", "text"), + }, + { + ResourceName: "google_pubsub_subscription.foo", + ImportStateId: subscriptionShort, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccPubsubSubscriptionCloudStorage_updateAvro(t *testing.T) { + t.Parallel() + + bucket := fmt.Sprintf("tf-test-bucket-%s", acctest.RandString(t, 10)) + topic := fmt.Sprintf("tf-test-topic-%s", acctest.RandString(t, 10)) + subscriptionShort := fmt.Sprintf("tf-test-sub-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckPubsubSubscriptionDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccPubsubSubscriptionCloudStorage_basic(bucket, topic, subscriptionShort, "", "", "", 0, "", 0, "", "avro"), + }, + { + ResourceName: "google_pubsub_subscription.foo", + ImportStateId: subscriptionShort, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccPubsubSubscriptionCloudStorage_basic(bucket, topic, subscriptionShort, "pre-", "-suffix", "YYYY-MM-DD/hh_mm_ssZ", 1000, "300s", 1000, "", "avro"), }, { ResourceName: "google_pubsub_subscription.foo", @@ -297,7 +331,7 @@ func TestAccPubsubSubscriptionCloudStorage_serviceAccount(t *testing.T) { CheckDestroy: testAccCheckPubsubSubscriptionDestroyProducer(t), Steps: []resource.TestStep{ { - Config: testAccPubsubSubscriptionCloudStorage_basic(bucket, topic, subscriptionShort, "", "", "", 0, "", "gcs-test-sa"), + Config: testAccPubsubSubscriptionCloudStorage_basic(bucket, topic, subscriptionShort, "", "", "", 0, "", 0, "gcs-test-sa", "text"), }, { ResourceName: "google_pubsub_subscription.foo", @@ -306,7 +340,7 @@ func TestAccPubsubSubscriptionCloudStorage_serviceAccount(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccPubsubSubscriptionCloudStorage_basic(bucket, topic, subscriptionShort, "pre-", "-suffix", "YYYY-MM-DD/hh_mm_ssZ", 1000, "300s", ""), + Config: testAccPubsubSubscriptionCloudStorage_basic(bucket, topic, subscriptionShort, "pre-", "-suffix", "YYYY-MM-DD/hh_mm_ssZ", 1000, "300s", 1000, "", "text"), }, { ResourceName: "google_pubsub_subscription.foo", @@ -315,7 +349,7 @@ func TestAccPubsubSubscriptionCloudStorage_serviceAccount(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccPubsubSubscriptionCloudStorage_basic(bucket, topic, subscriptionShort, "", "", "", 0, "", "gcs-test-sa2"), + Config: testAccPubsubSubscriptionCloudStorage_basic(bucket, topic, subscriptionShort, "", "", "", 0, "", 0, "gcs-test-sa2", "avro"), }, { ResourceName: "google_pubsub_subscription.foo", @@ -597,10 +631,10 @@ resource "google_pubsub_subscription" "foo" { } func testAccPubsubSubscriptionBigQuery_basic(dataset, table, topic, subscription string, useTableSchema bool, serviceAccountId string) string { - serivceAccountEmailField := "" - serivceAccountResource := "" + serviceAccountEmailField := "" + serviceAccountResource := "" if serviceAccountId != "" { - serivceAccountResource = fmt.Sprintf(` + serviceAccountResource = fmt.Sprintf(` resource "google_service_account" "bq_write_service_account" { account_id = "%s" display_name = "BQ Write Service Account" @@ -617,9 +651,9 @@ resource "google_project_iam_member" "editor" { role = "roles/bigquery.dataEditor" member = "serviceAccount:${google_service_account.bq_write_service_account.email}" }`, serviceAccountId) - serivceAccountEmailField = "service_account_email = google_service_account.bq_write_service_account.email" + serviceAccountEmailField = "service_account_email = google_service_account.bq_write_service_account.email" } else { - serivceAccountResource = fmt.Sprintf(` + serviceAccountResource = fmt.Sprintf(` resource "google_project_iam_member" "viewer" { project = data.google_project.project.project_id role = "roles/bigquery.metadataViewer" @@ -679,10 +713,10 @@ resource "google_pubsub_subscription" "foo" { google_project_iam_member.editor ] } - `, serivceAccountResource, dataset, table, topic, subscription, useTableSchema, serivceAccountEmailField) + `, serviceAccountResource, dataset, table, topic, subscription, useTableSchema, serviceAccountEmailField) } -func testAccPubsubSubscriptionCloudStorage_basic(bucket, topic, subscription, filenamePrefix, filenameSuffix, filenameDatetimeFormat string, maxBytes int, maxDuration string, serviceAccountId string) string { +func testAccPubsubSubscriptionCloudStorage_basic(bucket, topic, subscription, filenamePrefix, filenameSuffix, filenameDatetimeFormat string, maxBytes int, maxDuration string, maxMessages int, serviceAccountId, outputFormat string) string { filenamePrefixString := "" if filenamePrefix != "" { filenamePrefixString = fmt.Sprintf(`filename_prefix = "%s"`, filenamePrefix) @@ -703,11 +737,15 @@ func testAccPubsubSubscriptionCloudStorage_basic(bucket, topic, subscription, fi if maxDuration != "" { maxDurationString = fmt.Sprintf(`max_duration = "%s"`, maxDuration) } + maxMessagesString := "" + if maxMessages != 0 { + maxMessagesString = fmt.Sprintf(`max_messages = %d`, maxMessages) + } - serivceAccountEmailField := "" - serivceAccountResource := "" + serviceAccountEmailField := "" + serviceAccountResource := "" if serviceAccountId != "" { - serivceAccountResource = fmt.Sprintf(` + serviceAccountResource = fmt.Sprintf(` resource "google_service_account" "storage_write_service_account" { account_id = "%s" display_name = "Write Service Account" @@ -724,14 +762,23 @@ resource "google_project_iam_member" "editor" { role = "roles/bigquery.dataEditor" member = "serviceAccount:${google_service_account.storage_write_service_account.email}" }`, serviceAccountId) - serivceAccountEmailField = "service_account_email = google_service_account.storage_write_service_account.email" + serviceAccountEmailField = "service_account_email = google_service_account.storage_write_service_account.email" } else { - serivceAccountResource = fmt.Sprintf(` + serviceAccountResource = fmt.Sprintf(` resource "google_storage_bucket_iam_member" "admin" { bucket = google_storage_bucket.test.name role = "roles/storage.admin" member = "serviceAccount:service-${data.google_project.project.number}@gcp-sa-pubsub.iam.gserviceaccount.com" }`) + } + outputFormatString := "" + if outputFormat == "avro" { + outputFormatString = ` + avro_config { + write_metadata = true + use_topic_schema = true + } +` } return fmt.Sprintf(` data "google_project" "project" { } @@ -758,7 +805,9 @@ resource "google_pubsub_subscription" "foo" { %s %s %s - %s + %s + %s + %s } depends_on = [ @@ -766,7 +815,7 @@ resource "google_pubsub_subscription" "foo" { google_storage_bucket_iam_member.admin, ] } -`, bucket, serivceAccountResource, topic, subscription, filenamePrefixString, filenameSuffixString, filenameDatetimeString, maxBytesString, maxDurationString, serivceAccountEmailField) +`, bucket, serviceAccountResource, topic, subscription, filenamePrefixString, filenameSuffixString, filenameDatetimeString, maxBytesString, maxDurationString, maxMessagesString, serviceAccountEmailField, outputFormatString) } func testAccPubsubSubscription_topicOnly(topic string) string { From 2fde08a9f99b96341fc9e3008015905d72f7f912 Mon Sep 17 00:00:00 2001 From: harshitpatel-github Date: Mon, 2 Sep 2024 19:39:26 +0530 Subject: [PATCH 13/60] Redis Cluster maintenance policy and schedule addition (#11574) Co-authored-by: Harshit Patel --- mmv1/products/redis/Cluster.yaml | 118 ++++++++++++++++++ .../examples/redis_cluster_ha.tf.erb | 11 ++ .../redis_cluster_ha_single_zone.tf.erb | 11 ++ .../redis/resource_redis_cluster_test.go.erb | 55 ++++++-- 4 files changed, 184 insertions(+), 11 deletions(-) diff --git a/mmv1/products/redis/Cluster.yaml b/mmv1/products/redis/Cluster.yaml index c3a229daccd0..b4f30e868772 100644 --- a/mmv1/products/redis/Cluster.yaml +++ b/mmv1/products/redis/Cluster.yaml @@ -276,3 +276,121 @@ properties: Configure Redis Cluster behavior using a subset of native Redis configuration parameters. Please check Memorystore documentation for the list of supported parameters: https://cloud.google.com/memorystore/docs/cluster/supported-instance-configurations + - !ruby/object:Api::Type::NestedObject + name: maintenancePolicy + description: Maintenance policy for a cluster + properties: + - !ruby/object:Api::Type::String + name: 'createTime' + output: true + description: | + Output only. The time when the policy was created. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + resolution and up to nine fractional digits. + - !ruby/object:Api::Type::String + name: 'updateTime' + output: true + description: | + Output only. The time when the policy was last updated. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + resolution and up to nine fractional digits. + - !ruby/object:Api::Type::Array + name: 'weeklyMaintenanceWindow' + description: | + Optional. Maintenance window that is applied to resources covered by this policy. + Minimum 1. For the current version, the maximum number + of weekly_window is expected to be one. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Enum + name: 'day' + required: true + description: | + Required. The day of week that maintenance updates occur. + + - DAY_OF_WEEK_UNSPECIFIED: The day of the week is unspecified. + - MONDAY: Monday + - TUESDAY: Tuesday + - WEDNESDAY: Wednesday + - THURSDAY: Thursday + - FRIDAY: Friday + - SATURDAY: Saturday + - SUNDAY: Sunday + values: + - :DAY_OF_WEEK_UNSPECIFIED + - :MONDAY + - :TUESDAY + - :WEDNESDAY + - :THURSDAY + - :FRIDAY + - :SATURDAY + - :SUNDAY + - !ruby/object:Api::Type::String + name: 'duration' + output: true + description: | + Output only. Duration of the maintenance window. + The current window is fixed at 1 hour. + A duration in seconds with up to nine fractional digits, + terminated by 's'. Example: "3.5s". + - !ruby/object:Api::Type::NestedObject + name: 'startTime' + required: true + allow_empty_object: true + send_empty_value: true + description: | + Required. Start time of the window in UTC time. + properties: + - !ruby/object:Api::Type::Integer + name: 'hours' + description: | + Hours of day in 24 hour format. Should be from 0 to 23. + An API may choose to allow the value "24:00:00" for scenarios like business closing time. + validation: !ruby/object:Provider::Terraform::Validation + function: 'validation.IntBetween(0,23)' + - !ruby/object:Api::Type::Integer + name: 'minutes' + description: | + Minutes of hour of day. Must be from 0 to 59. + validation: !ruby/object:Provider::Terraform::Validation + function: 'validation.IntBetween(0,59)' + - !ruby/object:Api::Type::Integer + name: 'seconds' + description: | + Seconds of minutes of the time. Must normally be from 0 to 59. + An API may allow the value 60 if it allows leap-seconds. + validation: !ruby/object:Provider::Terraform::Validation + function: 'validation.IntBetween(0,60)' + - !ruby/object:Api::Type::Integer + name: 'nanos' + description: | + Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. + validation: !ruby/object:Provider::Terraform::Validation + function: 'validation.IntBetween(0,999999999)' + - !ruby/object:Api::Type::NestedObject + name: maintenanceSchedule + output: true + description: Upcoming maintenance schedule. + properties: + - !ruby/object:Api::Type::String + name: 'startTime' + output: true + description: | + Output only. The start time of any upcoming scheduled maintenance for this cluster. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + resolution and up to nine fractional digits. + - !ruby/object:Api::Type::String + name: 'endTime' + output: true + description: | + Output only. The end time of any upcoming scheduled maintenance for this cluster. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + resolution and up to nine fractional digits. + - !ruby/object:Api::Type::String + name: 'scheduleDeadlineTime' + output: true + description: | + Output only. The deadline that the maintenance schedule start time + can not go beyond, including reschedule. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + resolution and up to nine fractional digits. diff --git a/mmv1/templates/terraform/examples/redis_cluster_ha.tf.erb b/mmv1/templates/terraform/examples/redis_cluster_ha.tf.erb index 0ea4d32b4e28..4b6094c7e203 100644 --- a/mmv1/templates/terraform/examples/redis_cluster_ha.tf.erb +++ b/mmv1/templates/terraform/examples/redis_cluster_ha.tf.erb @@ -17,6 +17,17 @@ resource "google_redis_cluster" "<%= ctx[:primary_resource_id] %>" { zone_distribution_config { mode = "MULTI_ZONE" } + maintenance_policy { + weekly_maintenance_window { + day = "MONDAY" + start_time { + hours = 1 + minutes = 0 + seconds = 0 + nanos = 0 + } + } + } depends_on = [ google_network_connectivity_service_connection_policy.default ] diff --git a/mmv1/templates/terraform/examples/redis_cluster_ha_single_zone.tf.erb b/mmv1/templates/terraform/examples/redis_cluster_ha_single_zone.tf.erb index 8ff059978093..1e3eb14dd2ef 100644 --- a/mmv1/templates/terraform/examples/redis_cluster_ha_single_zone.tf.erb +++ b/mmv1/templates/terraform/examples/redis_cluster_ha_single_zone.tf.erb @@ -9,6 +9,17 @@ resource "google_redis_cluster" "<%= ctx[:primary_resource_id] %>" { mode = "SINGLE_ZONE" zone = "us-central1-f" } + maintenance_policy { + weekly_maintenance_window { + day = "MONDAY" + start_time { + hours = 1 + minutes = 0 + seconds = 0 + nanos = 0 + } + } + } deletion_protection_enabled = <%= ctx[:vars]['deletion_protection_enabled'] == 'true' %> depends_on = [ google_network_connectivity_service_connection_policy.default diff --git a/mmv1/third_party/terraform/services/redis/resource_redis_cluster_test.go.erb b/mmv1/third_party/terraform/services/redis/resource_redis_cluster_test.go.erb index 682d806d5448..2a356c798282 100644 --- a/mmv1/third_party/terraform/services/redis/resource_redis_cluster_test.go.erb +++ b/mmv1/third_party/terraform/services/redis/resource_redis_cluster_test.go.erb @@ -24,7 +24,7 @@ func TestAccRedisCluster_createClusterWithNodeType(t *testing.T) { Steps: []resource.TestStep{ { // create cluster with replica count 1 - Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 1, shardCount: 3, deletionProtectionEnabled: true, nodeType: "REDIS_STANDARD_SMALL", zoneDistributionMode: "MULTI_ZONE"}), + Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 1, shardCount: 3, deletionProtectionEnabled: true, nodeType: "REDIS_STANDARD_SMALL", zoneDistributionMode: "MULTI_ZONE", maintenanceDay: "TUESDAY", maintenanceHours: 2, maintenanceMinutes: 0, maintenanceSeconds: 0, maintenanceNanos: 0}), }, { ResourceName: "google_redis_cluster.test", @@ -34,7 +34,7 @@ func TestAccRedisCluster_createClusterWithNodeType(t *testing.T) { }, { // clean up the resource - Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 1, shardCount: 3, deletionProtectionEnabled: false, nodeType: "REDIS_STANDARD_SMALL", zoneDistributionMode: "MULTI_ZONE"}), + Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 1, shardCount: 3, deletionProtectionEnabled: false, nodeType: "REDIS_STANDARD_SMALL", zoneDistributionMode: "MULTI_ZONE", maintenanceDay: "TUESDAY", maintenanceHours: 2, maintenanceMinutes: 0, maintenanceSeconds: 0, maintenanceNanos: 0}), }, }, }) @@ -83,7 +83,7 @@ func TestAccRedisCluster_updateReplicaCount(t *testing.T) { Steps: []resource.TestStep{ { // create cluster with replica count 1 - Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 1, shardCount: 3, deletionProtectionEnabled: true, zoneDistributionMode: "MULTI_ZONE"}), + Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 1, shardCount: 3, deletionProtectionEnabled: true, zoneDistributionMode: "MULTI_ZONE", maintenanceDay: "MONDAY", maintenanceHours: 1, maintenanceMinutes: 0, maintenanceSeconds: 0, maintenanceNanos: 0}), }, { ResourceName: "google_redis_cluster.test", @@ -93,7 +93,7 @@ func TestAccRedisCluster_updateReplicaCount(t *testing.T) { }, { // update replica count to 2 - Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 2, shardCount: 3, deletionProtectionEnabled: true, zoneDistributionMode: "MULTI_ZONE"}), + Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 2, shardCount: 3, deletionProtectionEnabled: true, zoneDistributionMode: "MULTI_ZONE", maintenanceDay: "MONDAY", maintenanceHours: 1, maintenanceMinutes: 0, maintenanceSeconds: 0, maintenanceNanos: 0}), }, { ResourceName: "google_redis_cluster.test", @@ -103,7 +103,7 @@ func TestAccRedisCluster_updateReplicaCount(t *testing.T) { }, { // update replica count to 0 - Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 0, shardCount: 3, deletionProtectionEnabled: true, zoneDistributionMode: "MULTI_ZONE"}), + Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 0, shardCount: 3, deletionProtectionEnabled: true, zoneDistributionMode: "MULTI_ZONE", maintenanceDay: "MONDAY", maintenanceHours: 1, maintenanceMinutes: 0, maintenanceSeconds: 0, maintenanceNanos: 0}), }, { ResourceName: "google_redis_cluster.test", @@ -113,7 +113,7 @@ func TestAccRedisCluster_updateReplicaCount(t *testing.T) { }, { // clean up the resource - Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 0, shardCount: 3, deletionProtectionEnabled: false, zoneDistributionMode: "MULTI_ZONE"}), + Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 0, shardCount: 3, deletionProtectionEnabled: false, zoneDistributionMode: "MULTI_ZONE", maintenanceDay: "MONDAY", maintenanceHours: 1, maintenanceMinutes: 0, maintenanceSeconds: 0, maintenanceNanos: 0}), }, }, }) @@ -132,7 +132,7 @@ func TestAccRedisCluster_updateShardCount(t *testing.T) { Steps: []resource.TestStep{ { // create cluster with shard count 3 - Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 1, shardCount: 3, deletionProtectionEnabled: true, zoneDistributionMode: "MULTI_ZONE"}), + Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 1, shardCount: 3, deletionProtectionEnabled: true, zoneDistributionMode: "MULTI_ZONE", maintenanceDay: "MONDAY", maintenanceHours: 1, maintenanceMinutes: 0, maintenanceSeconds: 0, maintenanceNanos: 0}), }, { ResourceName: "google_redis_cluster.test", @@ -142,7 +142,7 @@ func TestAccRedisCluster_updateShardCount(t *testing.T) { }, { // update shard count to 5 - Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 1, shardCount: 5, deletionProtectionEnabled: true, zoneDistributionMode: "MULTI_ZONE"}), + Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 1, shardCount: 5, deletionProtectionEnabled: true, zoneDistributionMode: "MULTI_ZONE", maintenanceDay: "MONDAY", maintenanceHours: 1, maintenanceMinutes: 0, maintenanceSeconds: 0, maintenanceNanos: 0}), }, { ResourceName: "google_redis_cluster.test", @@ -152,7 +152,7 @@ func TestAccRedisCluster_updateShardCount(t *testing.T) { }, { // clean up the resource - Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 1, shardCount: 5, deletionProtectionEnabled: false, zoneDistributionMode: "MULTI_ZONE"}), + Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 1, shardCount: 5, deletionProtectionEnabled: false, zoneDistributionMode: "MULTI_ZONE", maintenanceDay: "MONDAY", maintenanceHours: 1, maintenanceMinutes: 0, maintenanceSeconds: 0, maintenanceNanos: 0}), }, }, }) @@ -175,6 +175,11 @@ func TestAccRedisCluster_updateRedisConfigs(t *testing.T) { name: name, shardCount: 3, zoneDistributionMode: "MULTI_ZONE", + maintenanceDay: "MONDAY", + maintenanceHours: 1, + maintenanceMinutes: 0, + maintenanceSeconds: 0, + maintenanceNanos: 0, redisConfigs: map[string]string{ "maxmemory-policy": "volatile-ttl", }}), @@ -191,6 +196,11 @@ func TestAccRedisCluster_updateRedisConfigs(t *testing.T) { name: name, shardCount: 3, zoneDistributionMode: "MULTI_ZONE", + maintenanceDay: "MONDAY", + maintenanceHours: 1, + maintenanceMinutes: 0, + maintenanceSeconds: 0, + maintenanceNanos: 0, redisConfigs: map[string]string{ "maxmemory-policy": "allkeys-lru", "maxmemory-clients": "90%", @@ -204,7 +214,7 @@ func TestAccRedisCluster_updateRedisConfigs(t *testing.T) { }, { // remove all redis configs - Config: createOrUpdateRedisCluster(&ClusterParams{name: name, shardCount: 3, zoneDistributionMode: "MULTI_ZONE"}), + Config: createOrUpdateRedisCluster(&ClusterParams{name: name, shardCount: 3, zoneDistributionMode: "MULTI_ZONE", maintenanceDay: "MONDAY", maintenanceHours: 1, maintenanceMinutes: 0, maintenanceSeconds: 0, maintenanceNanos: 0}), }, }, @@ -260,6 +270,11 @@ type ClusterParams struct { redisConfigs map[string]string zoneDistributionMode string zone string + maintenanceDay string + maintenanceHours int + maintenanceMinutes int + maintenanceSeconds int + maintenanceNanos int } func createOrUpdateRedisCluster(params *ClusterParams) string { @@ -278,6 +293,23 @@ func createOrUpdateRedisCluster(params *ClusterParams) string { `, params.zoneDistributionMode, params.zone) } + maintenancePolicyBlock := `` + if params.maintenanceDay != "" { + maintenancePolicyBlock = fmt.Sprintf(` + maintenance_policy { + weekly_maintenance_window { + day = "%s" + start_time { + hours = %d + minutes = %d + seconds = %d + nanos = %d + } + } + } + `, params.maintenanceDay, params.maintenanceHours, params.maintenanceMinutes, params.maintenanceSeconds, params.maintenanceNanos) + } + return fmt.Sprintf(` resource "google_redis_cluster" "test" { provider = google-beta @@ -293,6 +325,7 @@ resource "google_redis_cluster" "test" { redis_configs = { %s } + %s %s depends_on = [ google_network_connectivity_service_connection_policy.default @@ -324,7 +357,7 @@ resource "google_compute_network" "producer_net" { name = "%s" auto_create_subnetworks = false } -`, params.name, params.replicaCount, params.shardCount, params.nodeType, params.deletionProtectionEnabled, strBuilder.String(), zoneDistributionConfigBlock, params.name, params.name, params.name) +`, params.name, params.replicaCount, params.shardCount, params.nodeType, params.deletionProtectionEnabled, strBuilder.String(), zoneDistributionConfigBlock, maintenancePolicyBlock, params.name, params.name, params.name) } <% end -%> From e30f86c3411a5ab264ae91b1afb57e1a3c7531e6 Mon Sep 17 00:00:00 2001 From: Patrick Rauchfuss Date: Tue, 3 Sep 2024 17:56:14 +0200 Subject: [PATCH 14/60] `google_network_connectivity_spoke`: add `include_export_ranges` (#11609) --- mmv1/products/networkconnectivity/Spoke.yaml | 6 ++++++ mmv1/products/networkconnectivity/go_Spoke.yaml | 6 ++++++ ...tivity_spoke_linked_vpc_network_basic.tf.tmpl | 4 ++++ ...ctivity_spoke_linked_vpc_network_basic.tf.erb | 4 ++++ .../resource_network_connectivity_spoke_test.go | 16 ++++++++++++++++ .../samples/spoke/linked_vpc_network.tf.tmpl | 4 ++++ 6 files changed, 40 insertions(+) diff --git a/mmv1/products/networkconnectivity/Spoke.yaml b/mmv1/products/networkconnectivity/Spoke.yaml index 2c71d8359330..f6c1378783e5 100644 --- a/mmv1/products/networkconnectivity/Spoke.yaml +++ b/mmv1/products/networkconnectivity/Spoke.yaml @@ -194,6 +194,12 @@ properties: min_version: ga immutable: true item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: includeExportRanges + description: IP ranges allowed to be included from peering. + min_version: ga + immutable: true + item_type: Api::Type::String - !ruby/object:Api::Type::String name: uniqueId description: Output only. The Google-generated UUID for the spoke. This value is unique across all spoke resources. If a spoke is deleted and another with the same name is created, the new spoke is assigned a different unique_id. diff --git a/mmv1/products/networkconnectivity/go_Spoke.yaml b/mmv1/products/networkconnectivity/go_Spoke.yaml index a59e0fbfffe5..6913716512cb 100644 --- a/mmv1/products/networkconnectivity/go_Spoke.yaml +++ b/mmv1/products/networkconnectivity/go_Spoke.yaml @@ -189,6 +189,12 @@ properties: immutable: true item_type: type: String + - name: 'includeExportRanges' + type: Array + description: IP ranges allowed to be included from peering. + immutable: true + item_type: + type: String - name: 'uniqueId' type: String description: Output only. The Google-generated UUID for the spoke. This value is unique across all spoke resources. If a spoke is deleted and another with the same name is created, the new spoke is assigned a different unique_id. diff --git a/mmv1/templates/terraform/examples/go/network_connectivity_spoke_linked_vpc_network_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/network_connectivity_spoke_linked_vpc_network_basic.tf.tmpl index 52bc4184d109..c53105428404 100644 --- a/mmv1/templates/terraform/examples/go/network_connectivity_spoke_linked_vpc_network_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/network_connectivity_spoke_linked_vpc_network_basic.tf.tmpl @@ -24,6 +24,10 @@ resource "google_network_connectivity_spoke" "{{$.PrimaryResourceId}}" { "198.51.100.0/24", "10.10.0.0/16" ] + include_export_ranges = [ + "198.51.100.0/23", + "10.0.0.0/8" + ] uri = google_compute_network.network.self_link } } \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/network_connectivity_spoke_linked_vpc_network_basic.tf.erb b/mmv1/templates/terraform/examples/network_connectivity_spoke_linked_vpc_network_basic.tf.erb index 2e55da144ae6..7b95e795ad74 100644 --- a/mmv1/templates/terraform/examples/network_connectivity_spoke_linked_vpc_network_basic.tf.erb +++ b/mmv1/templates/terraform/examples/network_connectivity_spoke_linked_vpc_network_basic.tf.erb @@ -24,6 +24,10 @@ resource "google_network_connectivity_spoke" "<%= ctx[:primary_resource_id] %>" "198.51.100.0/24", "10.10.0.0/16" ] + include_export_ranges = [ + "198.51.100.0/23", + "10.0.0.0/8" + ] uri = google_compute_network.network.self_link } } \ No newline at end of file diff --git a/mmv1/third_party/terraform/services/networkconnectivity/resource_network_connectivity_spoke_test.go b/mmv1/third_party/terraform/services/networkconnectivity/resource_network_connectivity_spoke_test.go index 0daa74a35917..91f7956ba370 100644 --- a/mmv1/third_party/terraform/services/networkconnectivity/resource_network_connectivity_spoke_test.go +++ b/mmv1/third_party/terraform/services/networkconnectivity/resource_network_connectivity_spoke_test.go @@ -180,6 +180,10 @@ resource "google_network_connectivity_spoke" "primary" { "198.51.100.0/24", "10.10.0.0/16" ] + include_export_ranges = [ + "198.51.100.0/23", + "10.0.0.0/8" + ] uri = google_compute_network.network.self_link } } @@ -215,6 +219,10 @@ resource "google_network_connectivity_spoke" "primary" { "198.51.100.0/24", "10.10.0.0/16" ] + include_export_ranges = [ + "198.51.100.0/23", + "10.0.0.0/8" + ] uri = google_compute_network.network.self_link } } @@ -375,6 +383,10 @@ resource "google_network_connectivity_spoke" "primary" { "198.51.100.0/24", "10.10.0.0/16" ] + include_export_ranges = [ + "198.51.100.0/23", + "10.0.0.0/8" + ] uri = google_compute_network.network.self_link } } @@ -410,6 +422,10 @@ resource "google_network_connectivity_spoke" "primary" { "198.51.100.0/24", "10.10.0.0/16" ] + include_export_ranges = [ + "198.51.100.0/23", + "10.0.0.0/8" + ] uri = google_compute_network.network.self_link } } diff --git a/tpgtools/overrides/networkconnectivity/samples/spoke/linked_vpc_network.tf.tmpl b/tpgtools/overrides/networkconnectivity/samples/spoke/linked_vpc_network.tf.tmpl index 8a1bd423b3bb..32d52539aa15 100644 --- a/tpgtools/overrides/networkconnectivity/samples/spoke/linked_vpc_network.tf.tmpl +++ b/tpgtools/overrides/networkconnectivity/samples/spoke/linked_vpc_network.tf.tmpl @@ -25,6 +25,10 @@ resource "google_network_connectivity_spoke" "primary" { "198.51.100.0/24", "10.10.0.0/16" ] + include_export_ranges = [ + "198.51.100.0/23", + "10.0.0.0/8" + ] uri = google_compute_network.network.self_link } } \ No newline at end of file From a4183752c8464e20aa4055ed0b0520f309a09904 Mon Sep 17 00:00:00 2001 From: karolgorc Date: Tue, 3 Sep 2024 18:14:24 +0200 Subject: [PATCH 15/60] Validate that `subnetwork_project` should match with the project in `subnetwork` field in `google_compute_instance` resource (#11537) --- .../compute/resource_compute_instance.go.erb | 31 ++++++++++ .../resource_compute_instance_test.go.erb | 56 +++++++++++++++++++ .../tpgresource/self_link_helpers.go | 11 ++++ .../tpgresource/self_link_helpers_test.go | 12 ++++ .../docs/r/compute_instance.html.markdown | 2 +- 5 files changed, 111 insertions(+), 1 deletion(-) diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_instance.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_instance.go.erb index 9d3c847b5931..6e6de85288d1 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_instance.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_instance.go.erb @@ -99,6 +99,36 @@ var ( } ) +// This checks if the project provided in subnetwork's self_link matches +// the project provided in subnetwork_project not to produce a confusing plan diff. +func validateSubnetworkProject(ctx context.Context, d *schema.ResourceDiff, meta interface{}) error { + // separate func to allow unit testing + return ValidateSubnetworkProjectFunc(d) +} + +func ValidateSubnetworkProjectFunc(d tpgresource.TerraformResourceDiff) error { + oldCount, newCount := d.GetChange("network_interface.#") + if oldCount.(int) != newCount.(int) { + return nil + } + for i := 0; i < newCount.(int); i++ { + prefix := fmt.Sprintf("network_interface.%d", i) + subnetworkProject := d.Get(prefix + ".subnetwork_project") + subnetwork := d.Get(prefix + ".subnetwork") + + _, err := tpgresource.GetRelativePath(subnetwork.(string)) + if err != nil { + log.Printf("[DEBUG] Subnetwork %q is not a selflink", subnetwork) + return nil + } + + if tpgresource.GetProjectFromRegionalSelfLink(subnetwork.(string)) != subnetworkProject.(string) { + return fmt.Errorf("project in subnetwork's self_link %q must match subnetwork_project %q", subnetwork, subnetworkProject) + } + } + return nil +} + // network_interface.[d].network_ip can only change when subnet/network // is also changing. Validate that if network_ip is changing this scenario // holds up to par. @@ -1224,6 +1254,7 @@ be from 0 to 999,999,999 inclusive.`, suppressEmptyGuestAcceleratorDiff, ), desiredStatusDiff, + validateSubnetworkProject, forceNewIfNetworkIPNotUpdatable, tpgresource.SetLabelsDiff, ), diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_instance_test.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_instance_test.go.erb index ee355c7f988d..7b7ce8bd535a 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_instance_test.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_instance_test.go.erb @@ -2687,6 +2687,23 @@ func TestAccComputeInstance_subnetworkUpdate(t *testing.T) { }) } +func TestAccComputeInstance_subnetworkProjectMustMatchError(t *testing.T) { + t.Parallel() + instanceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + suffix := fmt.Sprintf("%s", acctest.RandString(t, 10)) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_subnetworkProjectExpectError(suffix, instanceName), + ExpectError: regexp.MustCompile("must match subnetwork_project"), + }, + }, + }) +} + func TestAccComputeInstance_networkIpUpdate(t *testing.T) { t.Parallel() @@ -8695,6 +8712,45 @@ func testAccComputeInstance_subnetworkUpdateTwo(suffix, instance string) string `, suffix, suffix, suffix, suffix, instance) } +func testAccComputeInstance_subnetworkProjectExpectError(suffix, instance string) string { + return fmt.Sprintf(` + data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" + } + + resource "google_compute_network" "inst-test-network" { + name = "tf-test-network-%s" + auto_create_subnetworks = false + } + + resource "google_compute_subnetwork" "inst-test-subnetwork" { + name = "tf-test-compute-subnet-%s" + ip_cidr_range = "10.0.0.0/16" + region = "us-east1" + network = google_compute_network.inst-test-network.id + } + + resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-east1-d" + allow_stopping_for_update = true + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.id + } + } + + network_interface { + subnetwork = google_compute_subnetwork.inst-test-subnetwork.id + subnetwork_project = "placeholder" + } + } +`, suffix, suffix, instance) +} + func testAccComputeInstance_networkIpUpdate(suffix, instance string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { diff --git a/mmv1/third_party/terraform/tpgresource/self_link_helpers.go b/mmv1/third_party/terraform/tpgresource/self_link_helpers.go index 91c5eefad548..5e28a562a2d9 100644 --- a/mmv1/third_party/terraform/tpgresource/self_link_helpers.go +++ b/mmv1/third_party/terraform/tpgresource/self_link_helpers.go @@ -170,3 +170,14 @@ func GetRegionFromRegionalSelfLink(selfLink string) string { } return selfLink } + +func GetProjectFromRegionalSelfLink(selfLink string) string { + re := regexp.MustCompile("projects/([a-zA-Z0-9-]*)/(?:locations|regions)/[a-zA-Z0-9-]*") + switch { + case re.MatchString(selfLink): + if res := re.FindStringSubmatch(selfLink); len(res) == 2 && res[1] != "" { + return res[1] + } + } + return selfLink +} diff --git a/mmv1/third_party/terraform/tpgresource/self_link_helpers_test.go b/mmv1/third_party/terraform/tpgresource/self_link_helpers_test.go index 8525ae3311d0..d640202c8b52 100644 --- a/mmv1/third_party/terraform/tpgresource/self_link_helpers_test.go +++ b/mmv1/third_party/terraform/tpgresource/self_link_helpers_test.go @@ -122,3 +122,15 @@ func TestGetRegionFromRegionalSelfLink(t *testing.T) { } } } + +func TestGetProjectFromRegionalSelfLink(t *testing.T) { + cases := map[string]string{ + "projects/foo/locations/europe-north1/datasets/bar/operations/foobar": "foo", + "projects/REDACTED/regions/europe-north1/subnetworks/tf-test-net-xbwhsmlfm8": "REDACTED", + } + for input, expected := range cases { + if result := GetProjectFromRegionalSelfLink(input); result != expected { + t.Errorf("expected to get %q from %q, got %q", expected, input, result) + } + } +} diff --git a/mmv1/third_party/terraform/website/docs/r/compute_instance.html.markdown b/mmv1/third_party/terraform/website/docs/r/compute_instance.html.markdown index cc01828f3c81..20bd5f0262bf 100644 --- a/mmv1/third_party/terraform/website/docs/r/compute_instance.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/compute_instance.html.markdown @@ -372,7 +372,7 @@ is desired, you will need to modify your state file manually using * `subnetwork_project` - (Optional) The project in which the subnetwork belongs. - If the `subnetwork` is a self_link, this field is ignored in favor of the project + If the `subnetwork` is a self_link, this field is set to the project defined in the subnetwork self_link. If the `subnetwork` is a name and this field is not provided, the provider project is used. From 94f1701838f6a6686e639d13fb2b75d7254a65a0 Mon Sep 17 00:00:00 2001 From: Nick Elliot Date: Tue, 3 Sep 2024 09:23:55 -0700 Subject: [PATCH 16/60] Final* go rewrite services (#11593) --- mmv1/products/activedirectory/go_Domain.yaml | 2 +- mmv1/products/compute/go_ForwardingRule.yaml | 2 +- mmv1/products/dlp/go_DiscoveryConfig.yaml | 1 + ...iveDirectory.yaml => ActiveDirectory.yaml} | 2 +- mmv1/products/netapp/Backup.yaml | 2 +- .../{backupPolicy.yaml => BackupPolicy.yaml} | 2 +- .../{backupVault.yaml => BackupVault.yaml} | 2 +- .../{storagePool.yaml => StoragePool.yaml} | 2 +- .../netapp/{volume.yaml => Volume.yaml} | 0 ...Directory.yaml => go_ActiveDirectory.yaml} | 3 +- .../netapp/{go_backup.yaml => go_Backup.yaml} | 3 +- ...backupPolicy.yaml => go_BackupPolicy.yaml} | 3 +- ...o_backupVault.yaml => go_BackupVault.yaml} | 3 +- ...o_storagePool.yaml => go_StoragePool.yaml} | 3 +- .../products/netapp/go_VolumeReplication.yaml | 4 +- mmv1/products/netapp/go_VolumeSnapshot.yaml | 1 + mmv1/products/netapp/go_kmsconfig.yaml | 1 + mmv1/products/notebooks/Location.yaml | 1 - mmv1/products/osconfig/go_GuestPolicies.yaml | 888 +++++++++++++++++ .../products/osconfig/go_PatchDeployment.yaml | 940 ++++++++++++++++++ mmv1/products/osconfig/go_product.yaml | 25 + mmv1/products/oslogin/go_SSHPublicKey.yaml | 73 ++ mmv1/products/oslogin/go_product.yaml | 23 + mmv1/products/parallelstore/Instance.yaml | 193 ++-- mmv1/products/parallelstore/go_Instance.yaml | 211 ++++ mmv1/products/parallelstore/go_product.yaml | 22 + .../privilegedaccessmanager/Entitlement.yaml | 58 +- .../go_Entitlement.yaml | 292 ++++++ .../privilegedaccessmanager/go_product.yaml | 36 + .../publicca/go_ExternalAccountKey.yaml | 81 ++ mmv1/products/publicca/go_product.yaml | 24 + mmv1/products/pubsublite/go_Reservation.yaml | 59 ++ mmv1/products/pubsublite/go_Subscription.yaml | 84 ++ mmv1/products/pubsublite/go_Topic.yaml | 117 +++ mmv1/products/pubsublite/go_product.yaml | 23 + mmv1/products/redis/go_Cluster.yaml | 285 ++++++ mmv1/products/redis/go_Instance.yaml | 552 ++++++++++ mmv1/products/redis/go_product.yaml | 30 + mmv1/products/resourcemanager/go_Lien.yaml | 95 ++ mmv1/products/resourcemanager/go_product.yaml | 22 + mmv1/products/runtimeconfig/go_Config.yaml | 57 ++ mmv1/products/runtimeconfig/go_product.yaml | 23 + mmv1/products/secretmanager/go_Secret.yaml | 250 +++++ .../secretmanager/go_SecretVersion.yaml | 139 +++ mmv1/products/secretmanager/go_product.yaml | 24 + .../securesourcemanager/go_Instance.yaml | 222 +++++ .../securesourcemanager/go_Repository.yaml | 159 +++ .../securesourcemanager/go_product.yaml | 22 + .../vmwareengine/go_ExternalAccessRule.yaml | 1 + .../vmwareengine/go_ExternalAddress.yaml | 2 +- .../go/netapp_volume_replication.go.tmpl | 2 +- .../constants/go/notebooks_instance.go.tmpl | 3 +- .../netapp_volume_replication.go.erb | 2 +- .../go/parallelstore_instance_basic.tf.tmpl | 3 +- .../examples/go/redis_cluster_ha.tf.tmpl | 6 +- .../go/redis_cluster_ha_single_zone.tf.tmpl | 4 +- .../terraform/expand_property_method.go.tmpl | 2 +- ...app_volume_replication_post_create.go.tmpl | 2 +- ...tapp_volume_replication_post_create.go.erb | 2 +- ...pp_volume_replication_mirror_state.go.tmpl | 2 +- ...app_volume_replication_mirror_state.go.erb | 2 +- .../resource_netapp_storage_pool_test.go.tmpl | 30 +- ...esource_netapp_active_directory_sweeper.go | 6 +- .../resource_netapp_active_directory_test.go | 10 +- .../resource_netapp_backup_policy_sweeper.go | 6 +- .../resource_netapp_backup_policy_test.go | 16 +- .../netapp/resource_netapp_backup_sweeper.go | 6 +- .../netapp/resource_netapp_backup_test.go | 12 +- .../resource_netapp_backup_vault_sweeper.go | 6 +- .../resource_netapp_backup_vault_test.go | 14 +- .../resource_netapp_storage_pool_sweeper.go | 6 +- .../resource_netapp_storage_pool_test.go.erb | 30 +- ...resource_netapp_volume_replication_test.go | 18 +- .../netapp/resource_netapp_volume_test.go | 2 +- .../go/resource_redis_cluster_test.go.tmpl | 84 +- .../resource_google_project_service.go.tmpl | 6 + 76 files changed, 5103 insertions(+), 248 deletions(-) rename mmv1/products/netapp/{activeDirectory.yaml => ActiveDirectory.yaml} (99%) rename mmv1/products/netapp/{backupPolicy.yaml => BackupPolicy.yaml} (99%) rename mmv1/products/netapp/{backupVault.yaml => BackupVault.yaml} (99%) rename mmv1/products/netapp/{storagePool.yaml => StoragePool.yaml} (99%) rename mmv1/products/netapp/{volume.yaml => Volume.yaml} (100%) rename mmv1/products/netapp/{go_activeDirectory.yaml => go_ActiveDirectory.yaml} (99%) rename mmv1/products/netapp/{go_backup.yaml => go_Backup.yaml} (99%) rename mmv1/products/netapp/{go_backupPolicy.yaml => go_BackupPolicy.yaml} (99%) rename mmv1/products/netapp/{go_backupVault.yaml => go_BackupVault.yaml} (98%) rename mmv1/products/netapp/{go_storagePool.yaml => go_StoragePool.yaml} (99%) create mode 100644 mmv1/products/osconfig/go_GuestPolicies.yaml create mode 100644 mmv1/products/osconfig/go_PatchDeployment.yaml create mode 100644 mmv1/products/osconfig/go_product.yaml create mode 100644 mmv1/products/oslogin/go_SSHPublicKey.yaml create mode 100644 mmv1/products/oslogin/go_product.yaml create mode 100644 mmv1/products/parallelstore/go_Instance.yaml create mode 100644 mmv1/products/parallelstore/go_product.yaml create mode 100644 mmv1/products/privilegedaccessmanager/go_Entitlement.yaml create mode 100644 mmv1/products/privilegedaccessmanager/go_product.yaml create mode 100644 mmv1/products/publicca/go_ExternalAccountKey.yaml create mode 100644 mmv1/products/publicca/go_product.yaml create mode 100644 mmv1/products/pubsublite/go_Reservation.yaml create mode 100644 mmv1/products/pubsublite/go_Subscription.yaml create mode 100644 mmv1/products/pubsublite/go_Topic.yaml create mode 100644 mmv1/products/pubsublite/go_product.yaml create mode 100644 mmv1/products/redis/go_Cluster.yaml create mode 100644 mmv1/products/redis/go_Instance.yaml create mode 100644 mmv1/products/redis/go_product.yaml create mode 100644 mmv1/products/resourcemanager/go_Lien.yaml create mode 100644 mmv1/products/resourcemanager/go_product.yaml create mode 100644 mmv1/products/runtimeconfig/go_Config.yaml create mode 100644 mmv1/products/runtimeconfig/go_product.yaml create mode 100644 mmv1/products/secretmanager/go_Secret.yaml create mode 100644 mmv1/products/secretmanager/go_SecretVersion.yaml create mode 100644 mmv1/products/secretmanager/go_product.yaml create mode 100644 mmv1/products/securesourcemanager/go_Instance.yaml create mode 100644 mmv1/products/securesourcemanager/go_Repository.yaml create mode 100644 mmv1/products/securesourcemanager/go_product.yaml diff --git a/mmv1/products/activedirectory/go_Domain.yaml b/mmv1/products/activedirectory/go_Domain.yaml index 07a88e9437f3..5e44da23b5f8 100644 --- a/mmv1/products/activedirectory/go_Domain.yaml +++ b/mmv1/products/activedirectory/go_Domain.yaml @@ -64,7 +64,7 @@ virtual_fields: When the field is set to false, deleting the domain is allowed. custom_code: custom_import: 'templates/terraform/custom_import/go/self_link_as_name.tmpl' - pre_delete: 'templates/terraform/pre_delete/go/active_directory_domain.tmpl' + pre_delete: 'templates/terraform/pre_delete/go/active_directory_domain.go.tmpl' error_abort_predicates: - 'transport_tpg.Is429QuotaError' diff --git a/mmv1/products/compute/go_ForwardingRule.yaml b/mmv1/products/compute/go_ForwardingRule.yaml index b5844ad72776..c1976d6981b4 100644 --- a/mmv1/products/compute/go_ForwardingRule.yaml +++ b/mmv1/products/compute/go_ForwardingRule.yaml @@ -47,7 +47,7 @@ async: message: 'message' collection_url_key: 'items' custom_code: - pre_create: templates/terraform/pre_create/compute_forwarding_rule.go.tmpl + pre_create: 'templates/terraform/pre_create/go/compute_forwarding_rule.go.tmpl' constants: 'templates/terraform/constants/go/compute_forwarding_rule.go.tmpl' post_create: 'templates/terraform/post_create/go/labels.tmpl' custom_diff: diff --git a/mmv1/products/dlp/go_DiscoveryConfig.yaml b/mmv1/products/dlp/go_DiscoveryConfig.yaml index 51c0b5cf22e8..ff7fd3f887e3 100644 --- a/mmv1/products/dlp/go_DiscoveryConfig.yaml +++ b/mmv1/products/dlp/go_DiscoveryConfig.yaml @@ -32,6 +32,7 @@ timeouts: insert_minutes: 20 update_minutes: 20 delete_minutes: 20 +skip_sweeper: true custom_code: encoder: 'templates/terraform/encoders/go/wrap_object.go.tmpl' update_encoder: 'templates/terraform/encoders/go/wrap_object.go.tmpl' diff --git a/mmv1/products/netapp/activeDirectory.yaml b/mmv1/products/netapp/ActiveDirectory.yaml similarity index 99% rename from mmv1/products/netapp/activeDirectory.yaml rename to mmv1/products/netapp/ActiveDirectory.yaml index ea16ef1af690..168b46bd3477 100644 --- a/mmv1/products/netapp/activeDirectory.yaml +++ b/mmv1/products/netapp/ActiveDirectory.yaml @@ -12,7 +12,7 @@ # limitations under the License. --- !ruby/object:Api::Resource -name: 'activeDirectory' +name: 'ActiveDirectory' description: | ActiveDirectory is the public representation of the active directory config. references: !ruby/object:Api::Resource::ReferenceLinks diff --git a/mmv1/products/netapp/Backup.yaml b/mmv1/products/netapp/Backup.yaml index f25347450d20..94cc6c199024 100644 --- a/mmv1/products/netapp/Backup.yaml +++ b/mmv1/products/netapp/Backup.yaml @@ -12,7 +12,7 @@ # limitations under the License. --- !ruby/object:Api::Resource -name: 'backup' +name: 'Backup' description: | NetApp Volumes supports volume backups, which are copies of your volumes stored independently from the volume. Backups are stored in backup vaults, diff --git a/mmv1/products/netapp/backupPolicy.yaml b/mmv1/products/netapp/BackupPolicy.yaml similarity index 99% rename from mmv1/products/netapp/backupPolicy.yaml rename to mmv1/products/netapp/BackupPolicy.yaml index ff6490094dce..24dbf79f8c9a 100644 --- a/mmv1/products/netapp/backupPolicy.yaml +++ b/mmv1/products/netapp/BackupPolicy.yaml @@ -12,7 +12,7 @@ # limitations under the License. --- !ruby/object:Api::Resource -name: 'backupPolicy' +name: 'BackupPolicy' description: | A backup policy is used to schedule backups at regular daily, weekly, or monthly intervals. Backup policies allow you to attach a backup schedule to a volume. diff --git a/mmv1/products/netapp/backupVault.yaml b/mmv1/products/netapp/BackupVault.yaml similarity index 99% rename from mmv1/products/netapp/backupVault.yaml rename to mmv1/products/netapp/BackupVault.yaml index e8a1386aebc8..2fc45a3d4eb7 100644 --- a/mmv1/products/netapp/backupVault.yaml +++ b/mmv1/products/netapp/BackupVault.yaml @@ -12,7 +12,7 @@ # limitations under the License. --- !ruby/object:Api::Resource -name: 'backupVault' +name: 'BackupVault' description: | A backup vault is the location where backups are stored. You can only create one backup vault per region. A vault can hold multiple backups for multiple volumes in that region. diff --git a/mmv1/products/netapp/storagePool.yaml b/mmv1/products/netapp/StoragePool.yaml similarity index 99% rename from mmv1/products/netapp/storagePool.yaml rename to mmv1/products/netapp/StoragePool.yaml index 5e677b6ba2b4..aa0e10862fbe 100644 --- a/mmv1/products/netapp/storagePool.yaml +++ b/mmv1/products/netapp/StoragePool.yaml @@ -12,7 +12,7 @@ # limitations under the License. --- !ruby/object:Api::Resource -name: 'storagePool' +name: 'StoragePool' description: | Storage pools act as containers for volumes. All volumes in a storage pool share the following information: * Location diff --git a/mmv1/products/netapp/volume.yaml b/mmv1/products/netapp/Volume.yaml similarity index 100% rename from mmv1/products/netapp/volume.yaml rename to mmv1/products/netapp/Volume.yaml diff --git a/mmv1/products/netapp/go_activeDirectory.yaml b/mmv1/products/netapp/go_ActiveDirectory.yaml similarity index 99% rename from mmv1/products/netapp/go_activeDirectory.yaml rename to mmv1/products/netapp/go_ActiveDirectory.yaml index 2bd57619497c..4c4c661294f9 100644 --- a/mmv1/products/netapp/go_activeDirectory.yaml +++ b/mmv1/products/netapp/go_ActiveDirectory.yaml @@ -13,7 +13,7 @@ # Warning: This is a temporary file, and should not be edited directly --- -name: 'activeDirectory' +name: 'ActiveDirectory' description: | ActiveDirectory is the public representation of the active directory config. references: @@ -44,6 +44,7 @@ async: result: resource_inside_response: false custom_code: +skip_sweeper: true examples: - name: 'netapp_active_directory_full' primary_resource_id: 'test_active_directory_full' diff --git a/mmv1/products/netapp/go_backup.yaml b/mmv1/products/netapp/go_Backup.yaml similarity index 99% rename from mmv1/products/netapp/go_backup.yaml rename to mmv1/products/netapp/go_Backup.yaml index 601d4352d8ea..469509368af4 100644 --- a/mmv1/products/netapp/go_backup.yaml +++ b/mmv1/products/netapp/go_Backup.yaml @@ -13,7 +13,7 @@ # Warning: This is a temporary file, and should not be edited directly --- -name: 'backup' +name: 'Backup' description: | NetApp Volumes supports volume backups, which are copies of your volumes stored independently from the volume. Backups are stored in backup vaults, @@ -57,6 +57,7 @@ async: result: resource_inside_response: false custom_code: +skip_sweeper: true examples: - name: 'netapp_backup' primary_resource_id: 'test_backup' diff --git a/mmv1/products/netapp/go_backupPolicy.yaml b/mmv1/products/netapp/go_BackupPolicy.yaml similarity index 99% rename from mmv1/products/netapp/go_backupPolicy.yaml rename to mmv1/products/netapp/go_BackupPolicy.yaml index b32ae31f5bca..060aeaad0c27 100644 --- a/mmv1/products/netapp/go_backupPolicy.yaml +++ b/mmv1/products/netapp/go_BackupPolicy.yaml @@ -13,7 +13,7 @@ # Warning: This is a temporary file, and should not be edited directly --- -name: 'backupPolicy' +name: 'BackupPolicy' description: | A backup policy is used to schedule backups at regular daily, weekly, or monthly intervals. Backup policies allow you to attach a backup schedule to a volume. @@ -46,6 +46,7 @@ async: result: resource_inside_response: false custom_code: +skip_sweeper: true examples: - name: 'netapp_backup_policy_full' primary_resource_id: 'test_backup_policy_full' diff --git a/mmv1/products/netapp/go_backupVault.yaml b/mmv1/products/netapp/go_BackupVault.yaml similarity index 98% rename from mmv1/products/netapp/go_backupVault.yaml rename to mmv1/products/netapp/go_BackupVault.yaml index 90fa81e7a8c9..fa1e372107f6 100644 --- a/mmv1/products/netapp/go_backupVault.yaml +++ b/mmv1/products/netapp/go_BackupVault.yaml @@ -13,7 +13,7 @@ # Warning: This is a temporary file, and should not be edited directly --- -name: 'backupVault' +name: 'BackupVault' description: | A backup vault is the location where backups are stored. You can only create one backup vault per region. A vault can hold multiple backups for multiple volumes in that region. @@ -45,6 +45,7 @@ async: result: resource_inside_response: false custom_code: +skip_sweeper: true examples: - name: 'netapp_backup_vault' primary_resource_id: 'test_backup_vault' diff --git a/mmv1/products/netapp/go_storagePool.yaml b/mmv1/products/netapp/go_StoragePool.yaml similarity index 99% rename from mmv1/products/netapp/go_storagePool.yaml rename to mmv1/products/netapp/go_StoragePool.yaml index ae28940a1f56..1ad285e73aae 100644 --- a/mmv1/products/netapp/go_storagePool.yaml +++ b/mmv1/products/netapp/go_StoragePool.yaml @@ -13,7 +13,7 @@ # Warning: This is a temporary file, and should not be edited directly --- -name: 'storagePool' +name: 'StoragePool' description: | Storage pools act as containers for volumes. All volumes in a storage pool share the following information: * Location @@ -68,6 +68,7 @@ async: resource_inside_response: false custom_code: pre_update: 'templates/terraform/pre_update/go/netapp_storagepool.go.tmpl' +skip_sweeper: true examples: - name: 'Storage_pool_create' primary_resource_id: 'test_pool' diff --git a/mmv1/products/netapp/go_VolumeReplication.yaml b/mmv1/products/netapp/go_VolumeReplication.yaml index 5aa5b909ce30..3c38699de70e 100644 --- a/mmv1/products/netapp/go_VolumeReplication.yaml +++ b/mmv1/products/netapp/go_VolumeReplication.yaml @@ -204,12 +204,12 @@ properties: - name: 'transferBytes' type: String description: | - Number of bytes transferred so far in current transfer. + Cumulative bytes transferred so far for the replication relationship. output: true - name: 'totalTransferDuration' type: String description: | - Total time taken so far during current transfer. + Cumulative time taken across all transfers for the replication relationship. output: true - name: 'lastTransferBytes' type: String diff --git a/mmv1/products/netapp/go_VolumeSnapshot.yaml b/mmv1/products/netapp/go_VolumeSnapshot.yaml index 1959c9892ac2..9769e6106db3 100644 --- a/mmv1/products/netapp/go_VolumeSnapshot.yaml +++ b/mmv1/products/netapp/go_VolumeSnapshot.yaml @@ -45,6 +45,7 @@ async: result: resource_inside_response: false custom_code: +skip_sweeper: true examples: - name: 'volume_snapshot_create' primary_resource_id: 'test_snapshot' diff --git a/mmv1/products/netapp/go_kmsconfig.yaml b/mmv1/products/netapp/go_kmsconfig.yaml index f97c79ffc243..d04d08fda04d 100644 --- a/mmv1/products/netapp/go_kmsconfig.yaml +++ b/mmv1/products/netapp/go_kmsconfig.yaml @@ -46,6 +46,7 @@ async: resource_inside_response: false custom_code: post_create: 'templates/terraform/post_create/go/KMS_Verify.go.tmpl' +skip_sweeper: true examples: - name: 'kmsConfig_create' primary_resource_id: 'kmsConfig' diff --git a/mmv1/products/notebooks/Location.yaml b/mmv1/products/notebooks/Location.yaml index bed0b008c9bc..e2adea99a325 100644 --- a/mmv1/products/notebooks/Location.yaml +++ b/mmv1/products/notebooks/Location.yaml @@ -23,5 +23,4 @@ properties: - !ruby/object:Api::Type::String name: 'name' description: 'Name of the Location resource.' - custom_flatten: templates/terraform/custom_flatten/name_from_self_link.erb diff --git a/mmv1/products/osconfig/go_GuestPolicies.yaml b/mmv1/products/osconfig/go_GuestPolicies.yaml new file mode 100644 index 000000000000..d77ecbbab880 --- /dev/null +++ b/mmv1/products/osconfig/go_GuestPolicies.yaml @@ -0,0 +1,888 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'GuestPolicies' +description: | + An OS Config resource representing a guest configuration policy. These policies represent + the desired state for VM instance guest environments including packages to install or remove, + package repository configurations, and software to install. +min_version: 'beta' +references: + guides: + 'Official Documentation': 'https://cloud.google.com/compute/docs/os-config-management' + api: 'https://cloud.google.com/compute/docs/osconfig/rest' +docs: +base_url: 'projects/{{project}}/guestPolicies' +self_link: 'projects/{{project}}/guestPolicies/{{guest_policy_id}}' +create_url: 'projects/{{project}}/guestPolicies?guestPolicyId={{guest_policy_id}}' +update_verb: 'PATCH' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +identity: + - guestPolicyId +custom_code: + post_create: 'templates/terraform/post_create/go/set_computed_name.tmpl' +examples: + - name: 'os_config_guest_policies_basic' + primary_resource_id: 'guest_policies' + vars: + instance_name: 'guest-policy-inst' + guest_policy_id: 'guest-policy' + ignore_read_extra: + - 'project' + - name: 'os_config_guest_policies_packages' + primary_resource_id: 'guest_policies' + vars: + guest_policy_id: 'guest-policy' + ignore_read_extra: + - 'project' + - name: 'os_config_guest_policies_recipes' + primary_resource_id: 'guest_policies' + vars: + guest_policy_id: 'guest-policy' + ignore_read_extra: + - 'project' +parameters: + - name: 'guestPolicyId' + type: String + description: | + The logical name of the guest policy in the project with the following restrictions: + * Must contain only lowercase letters, numbers, and hyphens. + * Must start with a letter. + * Must be between 1-63 characters. + * Must end with a number or a letter. + * Must be unique within the project. + min_version: 'beta' + url_param_only: true + required: true + validation: + regex: '(?:(?:[-a-z0-9]{1,63}\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))' +properties: + - name: 'name' + type: String + description: | + Unique name of the resource in this project using one of the following forms: projects/{project_number}/guestPolicies/{guestPolicyId}. + min_version: 'beta' + output: true + - name: 'description' + type: String + description: | + Description of the guest policy. Length of the description is limited to 1024 characters. + min_version: 'beta' + - name: 'assignment' + type: NestedObject + description: | + Specifies the VM instances that are assigned to this policy. This allows you to target sets + or groups of VM instances by different parameters such as labels, names, OS, or zones. + If left empty, all VM instances underneath this policy are targeted. + At the same level in the resource hierarchy (that is within a project), the service prevents + the creation of multiple policies that conflict with each other. + For more information, see how the service + [handles assignment conflicts](https://cloud.google.com/compute/docs/os-config-management/create-guest-policy#handle-conflicts). + min_version: 'beta' + required: true + properties: + - name: 'groupLabels' + type: Array + description: | + Targets instances matching at least one of these label sets. This allows an assignment to target disparate groups, + for example "env=prod or env=staging". + min_version: 'beta' + at_least_one_of: + - 'assignment.0.group_labels' + - 'assignment.0.zones' + - 'assignment.0.instances' + - 'assignment.0.instance_name_prefixes' + - 'assignment.0.os_types' + item_type: + type: NestedObject + properties: + - name: 'labels' + type: KeyValuePairs + description: | + Google Compute Engine instance labels that must be present for an instance to be included in this assignment group. + min_version: 'beta' + required: true + - name: 'zones' + type: Array + description: | + Targets instances in any of these zones. Leave empty to target instances in any zone. + Zonal targeting is uncommon and is supported to facilitate the management of changes by zone. + min_version: 'beta' + at_least_one_of: + - 'assignment.0.group_labels' + - 'assignment.0.zones' + - 'assignment.0.instances' + - 'assignment.0.instance_name_prefixes' + - 'assignment.0.os_types' + item_type: + type: String + - name: 'instances' + type: Array + description: | + Targets any of the instances specified. Instances are specified by their URI in the form + zones/[ZONE]/instances/[INSTANCE_NAME]. + Instance targeting is uncommon and is supported to facilitate the management of changes + by the instance or to target specific VM instances for development and testing. + Only supported for project-level policies and must reference instances within this project. + min_version: 'beta' + at_least_one_of: + - 'assignment.0.group_labels' + - 'assignment.0.zones' + - 'assignment.0.instances' + - 'assignment.0.instance_name_prefixes' + - 'assignment.0.os_types' + item_type: + type: String + - name: 'instanceNamePrefixes' + type: Array + description: | + Targets VM instances whose name starts with one of these prefixes. + Like labels, this is another way to group VM instances when targeting configs, + for example prefix="prod-". + Only supported for project-level policies. + min_version: 'beta' + at_least_one_of: + - 'assignment.0.group_labels' + - 'assignment.0.zones' + - 'assignment.0.instances' + - 'assignment.0.instance_name_prefixes' + - 'assignment.0.os_types' + item_type: + type: String + - name: 'osTypes' + type: Array + description: | + Targets VM instances matching at least one of the following OS types. + VM instances must match all supplied criteria for a given OsType to be included. + min_version: 'beta' + at_least_one_of: + - 'assignment.0.group_labels' + - 'assignment.0.zones' + - 'assignment.0.instances' + - 'assignment.0.instance_name_prefixes' + - 'assignment.0.os_types' + item_type: + type: NestedObject + properties: + - name: 'osShortName' + type: String + description: | + Targets VM instances with OS Inventory enabled and having the following OS short name, for example "debian" or "windows". + min_version: 'beta' + - name: 'osVersion' + type: String + description: | + Targets VM instances with OS Inventory enabled and having the following following OS version. + min_version: 'beta' + - name: 'osArchitecture' + type: String + description: | + Targets VM instances with OS Inventory enabled and having the following OS architecture. + min_version: 'beta' + - name: 'packages' + type: Array + description: | + The software packages to be managed by this policy. + min_version: 'beta' + item_type: + type: NestedObject + properties: + - name: 'name' + type: String + description: | + The name of the package. A package is uniquely identified for conflict validation + by checking the package name and the manager(s) that the package targets. + min_version: 'beta' + required: true + - name: 'desiredState' + type: Enum + description: | + The desiredState the agent should maintain for this package. The default is to ensure the package is installed. + min_version: 'beta' + enum_values: + - 'INSTALLED' + - 'UPDATED' + - 'REMOVED' + - name: 'manager' + type: Enum + description: | + Type of package manager that can be used to install this package. If a system does not have the package manager, + the package is not installed or removed no error message is returned. By default, or if you specify ANY, + the agent attempts to install and remove this package using the default package manager. + This is useful when creating a policy that applies to different types of systems. + The default behavior is ANY. + min_version: 'beta' + default_value: "ANY" + enum_values: + - 'ANY' + - 'APT' + - 'YUM' + - 'ZYPPER' + - 'GOO' + - name: 'packageRepositories' + type: Array + description: | + A list of package repositories to configure on the VM instance. + This is done before any other configs are applied so they can use these repos. + Package repositories are only configured if the corresponding package manager(s) are available. + min_version: 'beta' + item_type: + type: NestedObject + properties: + - name: 'apt' + type: NestedObject + description: | + An Apt Repository. + # TODO (mbang): add exactly_one_of when it can be applied to lists (https://github.com/hashicorp/terraform-plugin-sdk/issues/470) + min_version: 'beta' + properties: + - name: 'archiveType' + type: Enum + description: | + Type of archive files in this repository. The default behavior is DEB. + min_version: 'beta' + default_value: "DEB" + enum_values: + - 'DEB' + - 'DEB_SRC' + - name: 'uri' + type: String + description: | + URI for this repository. + min_version: 'beta' + required: true + - name: 'distribution' + type: String + description: | + Distribution of this repository. + min_version: 'beta' + required: true + - name: 'components' + type: Array + description: | + List of components for this repository. Must contain at least one item. + min_version: 'beta' + required: true + item_type: + type: String + - name: 'gpgKey' + type: String + description: | + URI of the key file for this repository. The agent maintains a keyring at + /etc/apt/trusted.gpg.d/osconfig_agent_managed.gpg containing all the keys in any applied guest policy. + min_version: 'beta' + - name: 'yum' + type: NestedObject + description: | + A Yum Repository. + # TODO (mbang): add exactly_one_of when it can be applied to lists (https://github.com/hashicorp/terraform-plugin-sdk/issues/470) + min_version: 'beta' + properties: + - name: 'id' + type: String + description: | + A one word, unique name for this repository. This is the repo id in the Yum config file and also the displayName + if displayName is omitted. This id is also used as the unique identifier when checking for guest policy conflicts. + min_version: 'beta' + required: true + - name: 'displayName' + type: String + description: | + The display name of the repository. + min_version: 'beta' + - name: 'baseUrl' + type: String + description: | + The location of the repository directory. + min_version: 'beta' + required: true + - name: 'gpgKeys' + type: Array + description: | + URIs of GPG keys. + min_version: 'beta' + item_type: + type: String + - name: 'zypper' + type: NestedObject + description: | + A Zypper Repository. + # TODO (mbang): add exactly_one_of when it can be applied to lists (https://github.com/hashicorp/terraform-plugin-sdk/issues/470) + min_version: 'beta' + properties: + - name: 'id' + type: String + description: | + A one word, unique name for this repository. This is the repo id in the zypper config file and also the displayName + if displayName is omitted. This id is also used as the unique identifier when checking for guest policy conflicts. + min_version: 'beta' + required: true + - name: 'displayName' + type: String + description: | + The display name of the repository. + min_version: 'beta' + - name: 'baseUrl' + type: String + description: | + The location of the repository directory. + min_version: 'beta' + required: true + - name: 'gpgKeys' + type: Array + description: | + URIs of GPG keys. + min_version: 'beta' + item_type: + type: String + - name: 'goo' + type: NestedObject + description: | + A Goo Repository. + # TODO (mbang): add exactly_one_of when it can be applied to lists (https://github.com/hashicorp/terraform-plugin-sdk/issues/470) + min_version: 'beta' + properties: + - name: 'name' + type: String + description: | + The name of the repository. + min_version: 'beta' + required: true + - name: 'url' + type: String + description: | + The url of the repository. + min_version: 'beta' + required: true + - name: 'recipes' + type: Array + description: | + A list of Recipes to install on the VM instance. + min_version: 'beta' + item_type: + type: NestedObject + properties: + - name: 'name' + type: String + description: | + Unique identifier for the recipe. Only one recipe with a given name is installed on an instance. + Names are also used to identify resources which helps to determine whether guest policies have conflicts. + This means that requests to create multiple recipes with the same name and version are rejected since they + could potentially have conflicting assignments. + min_version: 'beta' + required: true + - name: 'version' + type: String + description: | + The version of this software recipe. Version can be up to 4 period separated numbers (e.g. 12.34.56.78). + min_version: 'beta' + - name: 'artifacts' + type: Array + description: | + Resources available to be used in the steps in the recipe. + min_version: 'beta' + item_type: + type: NestedObject + properties: + - name: 'id' + type: String + description: | + Id of the artifact, which the installation and update steps of this recipe can reference. + Artifacts in a recipe cannot have the same id. + min_version: 'beta' + required: true + - name: 'allowInsecure' + type: Boolean + description: | + Defaults to false. When false, recipes are subject to validations based on the artifact type: + Remote: A checksum must be specified, and only protocols with transport-layer security are permitted. + GCS: An object generation number must be specified. + min_version: 'beta' + default_value: false + - name: 'remote' + type: NestedObject + description: | + A generic remote artifact. + # TODO (mbang): add `conflicts` when it can be applied to lists (https://github.com/hashicorp/terraform-plugin-sdk/issues/470) + min_version: 'beta' + properties: + - name: 'uri' + type: String + description: | + URI from which to fetch the object. It should contain both the protocol and path following the format {protocol}://{location}. + min_version: 'beta' + - name: 'checkSum' + type: String + description: | + Must be provided if allowInsecure is false. SHA256 checksum in hex format, to compare to the checksum of the artifact. + If the checksum is not empty and it doesn't match the artifact then the recipe installation fails before running any + of the steps. + min_version: 'beta' + - name: 'gcs' + type: NestedObject + description: | + A Google Cloud Storage artifact. + # TODO (mbang): add `conflicts` when it can be applied to lists (https://github.com/hashicorp/terraform-plugin-sdk/issues/470) + min_version: 'beta' + properties: + - name: 'bucket' + type: String + description: | + Bucket of the Google Cloud Storage object. Given an example URL: https://storage.googleapis.com/my-bucket/foo/bar#1234567 + this value would be my-bucket. + min_version: 'beta' + - name: 'object' + type: String + description: | + Name of the Google Cloud Storage object. Given an example URL: https://storage.googleapis.com/my-bucket/foo/bar#1234567 + this value would be foo/bar. + min_version: 'beta' + - name: 'generation' + type: Integer + description: | + Must be provided if allowInsecure is false. Generation number of the Google Cloud Storage object. + https://storage.googleapis.com/my-bucket/foo/bar#1234567 this value would be 1234567. + min_version: 'beta' + - name: 'installSteps' + type: Array + description: | + Actions to be taken for installing this recipe. On failure it stops executing steps and does not attempt another installation. + Any steps taken (including partially completed steps) are not rolled back. + min_version: 'beta' + item_type: + type: NestedObject + properties: + - name: 'fileCopy' + type: NestedObject + description: | + Copies a file onto the instance. + # TODO (mbang): add exactly_one_of when it can be applied to lists (https://github.com/hashicorp/terraform-plugin-sdk/issues/470) + min_version: 'beta' + properties: + - name: 'artifactId' + type: String + description: | + The id of the relevant artifact in the recipe. + min_version: 'beta' + required: true + - name: 'destination' + type: String + description: | + The absolute path on the instance to put the file. + min_version: 'beta' + required: true + - name: 'overwrite' + type: Boolean + description: | + Whether to allow this step to overwrite existing files.If this is false and the file already exists the file + is not overwritten and the step is considered a success. Defaults to false. + min_version: 'beta' + default_value: false + - name: 'permissions' + type: String + description: | + Consists of three octal digits which represent, in order, the permissions of the owner, group, and other users + for the file (similarly to the numeric mode used in the linux chmod utility). Each digit represents a three bit + number with the 4 bit corresponding to the read permissions, the 2 bit corresponds to the write bit, and the one + bit corresponds to the execute permission. Default behavior is 755. + + Below are some examples of permissions and their associated values: + read, write, and execute: 7 read and execute: 5 read and write: 6 read only: 4 + min_version: 'beta' + - name: 'archiveExtraction' + type: NestedObject + description: | + Extracts an archive into the specified directory. + # TODO (mbang): add exactly_one_of when it can be applied to lists (https://github.com/hashicorp/terraform-plugin-sdk/issues/470) + min_version: 'beta' + properties: + - name: 'artifactId' + type: String + description: | + The id of the relevant artifact in the recipe. + min_version: 'beta' + required: true + - name: 'destination' + type: String + description: | + Directory to extract archive to. Defaults to / on Linux or C:\ on Windows. + min_version: 'beta' + default_from_api: true + - name: 'type' + type: Enum + description: | + The type of the archive to extract. + min_version: 'beta' + required: true + enum_values: + - 'TAR' + - 'TAR_GZIP' + - 'TAR_BZIP' + - 'TAR_LZMA' + - 'TAR_XZ' + - 'ZIP' + - name: 'msiInstallation' + type: NestedObject + description: | + Installs an MSI file. + # TODO (mbang): add exactly_one_of when it can be applied to lists (https://github.com/hashicorp/terraform-plugin-sdk/issues/470) + min_version: 'beta' + properties: + - name: 'artifactId' + type: String + description: | + The id of the relevant artifact in the recipe. + min_version: 'beta' + required: true + - name: 'flags' + type: Array + description: | + The flags to use when installing the MSI. Defaults to the install flag. + min_version: 'beta' + default_from_api: true + item_type: + type: String + - name: 'allowedExitCodes' + type: Array + description: | + Return codes that indicate that the software installed or updated successfully. Behaviour defaults to [0] + min_version: 'beta' + default_from_api: true + item_type: + type: Integer + - name: 'dpkgInstallation' + type: NestedObject + description: | + Installs a deb file via dpkg. + # TODO (mbang): add exactly_one_of when it can be applied to lists (https://github.com/hashicorp/terraform-plugin-sdk/issues/470) + min_version: 'beta' + properties: + - name: 'artifactId' + type: String + description: | + The id of the relevant artifact in the recipe. + min_version: 'beta' + required: true + - name: 'rpmInstallation' + type: NestedObject + description: | + Installs an rpm file via the rpm utility. + # TODO (mbang): add exactly_one_of when it can be applied to lists (https://github.com/hashicorp/terraform-plugin-sdk/issues/470) + min_version: 'beta' + properties: + - name: 'artifactId' + type: String + description: | + The id of the relevant artifact in the recipe. + min_version: 'beta' + required: true + - name: 'fileExec' + type: NestedObject + description: | + Executes an artifact or local file. + # TODO (mbang): add exactly_one_of when it can be applied to lists (https://github.com/hashicorp/terraform-plugin-sdk/issues/470) + min_version: 'beta' + properties: + - name: 'args' + type: Array + description: | + Arguments to be passed to the provided executable. + min_version: 'beta' + item_type: + type: String + - name: 'allowedExitCodes' + type: String + description: | + A list of possible return values that the program can return to indicate a success. Defaults to [0]. + min_version: 'beta' + default_from_api: true + - name: 'artifactId' + type: String + description: | + The id of the relevant artifact in the recipe. + # TODO (mbang): add exactly_one_of when it can be applied to lists (https://github.com/hashicorp/terraform-plugin-sdk/issues/470) + min_version: 'beta' + - name: 'localPath' + type: String + description: | + The absolute path of the file on the local filesystem. + # TODO (mbang): add exactly_one_of when it can be applied to lists (https://github.com/hashicorp/terraform-plugin-sdk/issues/470) + min_version: 'beta' + - name: 'scriptRun' + type: NestedObject + description: | + Runs commands in a shell. + # TODO (mbang): add exactly_one_of when it can be applied to lists (https://github.com/hashicorp/terraform-plugin-sdk/issues/470) + min_version: 'beta' + properties: + - name: 'script' + type: String + description: | + The shell script to be executed. + min_version: 'beta' + required: true + - name: 'allowedExitCodes' + type: Array + description: | + Return codes that indicate that the software installed or updated successfully. Behaviour defaults to [0] + min_version: 'beta' + default_from_api: true + item_type: + type: Integer + - name: 'interpreter' + type: Enum + description: | + The script interpreter to use to run the script. If no interpreter is specified the script is executed directly, + which likely only succeed for scripts with shebang lines. + min_version: 'beta' + enum_values: + - 'SHELL' + - 'POWERSHELL' + - name: 'updateSteps' + type: Array + description: | + Actions to be taken for updating this recipe. On failure it stops executing steps and does not attempt another update for this recipe. + Any steps taken (including partially completed steps) are not rolled back. + min_version: 'beta' + item_type: + type: NestedObject + properties: + - name: 'fileCopy' + type: NestedObject + description: | + Copies a file onto the instance. + # TODO (mbang): add exactly_one_of when it can be applied to lists (https://github.com/hashicorp/terraform-plugin-sdk/issues/470) + min_version: 'beta' + properties: + - name: 'artifactId' + type: String + description: | + The id of the relevant artifact in the recipe. + min_version: 'beta' + required: true + - name: 'destination' + type: String + description: | + The absolute path on the instance to put the file. + min_version: 'beta' + required: true + - name: 'overwrite' + type: Boolean + description: | + Whether to allow this step to overwrite existing files.If this is false and the file already exists the file + is not overwritten and the step is considered a success. Defaults to false. + min_version: 'beta' + default_value: false + - name: 'permissions' + type: String + description: | + Consists of three octal digits which represent, in order, the permissions of the owner, group, and other users + for the file (similarly to the numeric mode used in the linux chmod utility). Each digit represents a three bit + number with the 4 bit corresponding to the read permissions, the 2 bit corresponds to the write bit, and the one + bit corresponds to the execute permission. Default behavior is 755. + + Below are some examples of permissions and their associated values: + read, write, and execute: 7 read and execute: 5 read and write: 6 read only: 4 + min_version: 'beta' + - name: 'archiveExtraction' + type: NestedObject + description: | + Extracts an archive into the specified directory. + # TODO (mbang): add exactly_one_of when it can be applied to lists (https://github.com/hashicorp/terraform-plugin-sdk/issues/470) + min_version: 'beta' + properties: + - name: 'artifactId' + type: String + description: | + The id of the relevant artifact in the recipe. + min_version: 'beta' + required: true + - name: 'destination' + type: String + description: | + Directory to extract archive to. Defaults to / on Linux or C:\ on Windows. + min_version: 'beta' + default_from_api: true + - name: 'type' + type: Enum + description: | + The type of the archive to extract. + min_version: 'beta' + required: true + enum_values: + - 'TAR' + - 'TAR_GZIP' + - 'TAR_BZIP' + - 'TAR_LZMA' + - 'TAR_XZ' + - 'ZIP' + - name: 'msiInstallation' + type: NestedObject + description: | + Installs an MSI file. + # TODO (mbang): add exactly_one_of when it can be applied to lists (https://github.com/hashicorp/terraform-plugin-sdk/issues/470) + min_version: 'beta' + properties: + - name: 'artifactId' + type: String + description: | + The id of the relevant artifact in the recipe. + min_version: 'beta' + required: true + - name: 'flags' + type: Array + description: | + The flags to use when installing the MSI. Defaults to the install flag. + min_version: 'beta' + default_from_api: true + item_type: + type: String + - name: 'allowedExitCodes' + type: Array + description: | + Return codes that indicate that the software installed or updated successfully. Behaviour defaults to [0] + min_version: 'beta' + default_from_api: true + item_type: + type: Integer + - name: 'dpkgInstallation' + type: NestedObject + description: | + Installs a deb file via dpkg. + # TODO (mbang): add exactly_one_of when it can be applied to lists (https://github.com/hashicorp/terraform-plugin-sdk/issues/470) + min_version: 'beta' + properties: + - name: 'artifactId' + type: String + description: | + The id of the relevant artifact in the recipe. + min_version: 'beta' + required: true + - name: 'rpmInstallation' + type: NestedObject + description: | + Installs an rpm file via the rpm utility. + # TODO (mbang): add exactly_one_of when it can be applied to lists (https://github.com/hashicorp/terraform-plugin-sdk/issues/470) + min_version: 'beta' + properties: + - name: 'artifactId' + type: String + description: | + The id of the relevant artifact in the recipe. + min_version: 'beta' + required: true + - name: 'fileExec' + type: NestedObject + description: | + Executes an artifact or local file. + # TODO (mbang): add exactly_one_of when it can be applied to lists (https://github.com/hashicorp/terraform-plugin-sdk/issues/470) + min_version: 'beta' + properties: + - name: 'args' + type: Array + description: | + Arguments to be passed to the provided executable. + min_version: 'beta' + item_type: + type: String + - name: 'allowedExitCodes' + type: Array + description: | + A list of possible return values that the program can return to indicate a success. Defaults to [0]. + min_version: 'beta' + default_from_api: true + item_type: + type: Integer + - name: 'artifactId' + type: String + description: | + The id of the relevant artifact in the recipe. + # TODO (mbang): add exactly_one_of when it can be applied to lists (https://github.com/hashicorp/terraform-plugin-sdk/issues/470) + min_version: 'beta' + - name: 'localPath' + type: String + description: | + The absolute path of the file on the local filesystem. + # TODO (mbang): add exactly_one_of when it can be applied to lists (https://github.com/hashicorp/terraform-plugin-sdk/issues/470) + min_version: 'beta' + - name: 'scriptRun' + type: NestedObject + description: | + Runs commands in a shell. + # TODO (mbang): add exactly_one_of when it can be applied to lists (https://github.com/hashicorp/terraform-plugin-sdk/issues/470) + min_version: 'beta' + properties: + - name: 'script' + type: String + description: | + The shell script to be executed. + min_version: 'beta' + required: true + - name: 'allowedExitCodes' + type: Array + description: | + Return codes that indicate that the software installed or updated successfully. Behaviour defaults to [0] + min_version: 'beta' + default_from_api: true + item_type: + type: Integer + - name: 'interpreter' + type: Enum + description: | + The script interpreter to use to run the script. If no interpreter is specified the script is executed directly, + which likely only succeed for scripts with shebang lines. + min_version: 'beta' + enum_values: + - 'SHELL' + - 'POWERSHELL' + - name: 'desiredState' + type: Enum + description: | + Default is INSTALLED. The desired state the agent should maintain for this recipe. + + INSTALLED: The software recipe is installed on the instance but won't be updated to new versions. + INSTALLED_KEEP_UPDATED: The software recipe is installed on the instance. The recipe is updated to a higher version, + if a higher version of the recipe is assigned to this instance. + REMOVE: Remove is unsupported for software recipes and attempts to create or update a recipe to the REMOVE state is rejected. + min_version: 'beta' + default_value: "INSTALLED" + enum_values: + - 'INSTALLED' + - 'UPDATED' + - 'REMOVED' + - name: 'createTime' + type: String + description: | + Time this guest policy was created. A timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds. + Example: "2014-10-02T15:01:23.045123456Z". + min_version: 'beta' + output: true + - name: 'updateTime' + type: String + description: | + Last time this guest policy was updated. A timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds. + Example: "2014-10-02T15:01:23.045123456Z". + min_version: 'beta' + output: true + - name: 'etag' + type: String + description: | + The etag for this guest policy. If this is provided on update, it must match the server's etag. + + min_version: 'beta' + default_from_api: true diff --git a/mmv1/products/osconfig/go_PatchDeployment.yaml b/mmv1/products/osconfig/go_PatchDeployment.yaml new file mode 100644 index 000000000000..17ae197d996e --- /dev/null +++ b/mmv1/products/osconfig/go_PatchDeployment.yaml @@ -0,0 +1,940 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'PatchDeployment' +description: | + Patch deployments are configurations that individual patch jobs use to complete a patch. + These configurations include instance filter, package repository settings, and a schedule. +references: + guides: + 'Official Documentation': 'https://cloud.google.com/compute/docs/os-patch-management' + api: 'https://cloud.google.com/compute/docs/osconfig/rest' +docs: +id_format: '{{name}}' +base_url: 'projects/{{project}}/patchDeployments' +self_link: '{{name}}' +create_url: 'projects/{{project}}/patchDeployments?patchDeploymentId={{patch_deployment_id}}' +immutable: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +custom_code: + encoder: 'templates/terraform/encoders/go/os_config_patch_deployment.go.tmpl' + decoder: 'templates/terraform/decoders/go/os_config_patch_deployment.go.tmpl' + post_create: 'templates/terraform/post_create/go/set_computed_name.tmpl' + custom_import: 'templates/terraform/custom_import/go/self_link_as_name.tmpl' +examples: + - name: 'os_config_patch_deployment_basic' + primary_resource_id: 'patch' + vars: + patch_deployment_id: 'patch-deploy' + - name: 'os_config_patch_deployment_daily' + primary_resource_id: 'patch' + vars: + patch_deployment_id: 'patch-deploy' + - name: 'os_config_patch_deployment_daily_midnight' + primary_resource_id: 'patch' + vars: + patch_deployment_id: 'patch-deploy' + - name: 'os_config_patch_deployment_instance' + primary_resource_id: 'patch' + vars: + instance_name: 'patch-deploy-inst' + patch_deployment_id: 'patch-deploy' + - name: 'os_config_patch_deployment_full' + primary_resource_id: 'patch' + vars: + patch_deployment_id: 'patch-deploy' +parameters: + - name: 'patchDeploymentId' + type: String + description: | + A name for the patch deployment in the project. When creating a name the following rules apply: + * Must contain only lowercase letters, numbers, and hyphens. + * Must start with a letter. + * Must be between 1-63 characters. + * Must end with a number or a letter. + * Must be unique within the project. + url_param_only: true + required: true + validation: + regex: '(?:(?:[-a-z0-9]{1,63}\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))' +properties: + - name: 'name' + type: String + description: | + Unique name for the patch deployment resource in a project. + The patch deployment name is in the form: projects/{project_id}/patchDeployments/{patchDeploymentId}. + output: true + - name: 'description' + type: String + description: | + Description of the patch deployment. Length of the description is limited to 1024 characters. + - name: 'instanceFilter' + type: NestedObject + description: | + VM instances to patch. + required: true + properties: + - name: 'all' + type: Boolean + description: | + Target all VM instances in the project. If true, no other criteria is permitted. + at_least_one_of: + - 'instance_filter.0.all' + - 'instance_filter.0.group_labels' + - 'instance_filter.0.zones' + - 'instance_filter.0.instances' + - 'instance_filter.0.instance_name_prefixes' + - name: 'groupLabels' + type: Array + description: | + Targets VM instances matching ANY of these GroupLabels. This allows targeting of disparate groups of VM instances. + at_least_one_of: + - 'instance_filter.0.all' + - 'instance_filter.0.group_labels' + - 'instance_filter.0.zones' + - 'instance_filter.0.instances' + - 'instance_filter.0.instance_name_prefixes' + item_type: + type: NestedObject + properties: + - name: 'labels' + type: KeyValuePairs + description: | + Compute Engine instance labels that must be present for a VM instance to be targeted by this filter + required: true + - name: 'zones' + type: Array + description: | + Targets VM instances in ANY of these zones. Leave empty to target VM instances in any zone. + at_least_one_of: + - 'instance_filter.0.all' + - 'instance_filter.0.group_labels' + - 'instance_filter.0.zones' + - 'instance_filter.0.instances' + - 'instance_filter.0.instance_name_prefixes' + item_type: + type: String + - name: 'instances' + type: Array + description: | + Targets any of the VM instances specified. Instances are specified by their URI in the `form zones/{{zone}}/instances/{{instance_name}}`, + `projects/{{project_id}}/zones/{{zone}}/instances/{{instance_name}}`, or + `https://www.googleapis.com/compute/v1/projects/{{project_id}}/zones/{{zone}}/instances/{{instance_name}}` + at_least_one_of: + - 'instance_filter.0.all' + - 'instance_filter.0.group_labels' + - 'instance_filter.0.zones' + - 'instance_filter.0.instances' + - 'instance_filter.0.instance_name_prefixes' + item_type: + type: String + - name: 'instanceNamePrefixes' + type: Array + description: | + Targets VMs whose name starts with one of these prefixes. Similar to labels, this is another way to group + VMs when targeting configs, for example prefix="prod-". + at_least_one_of: + - 'instance_filter.0.all' + - 'instance_filter.0.group_labels' + - 'instance_filter.0.zones' + - 'instance_filter.0.instances' + - 'instance_filter.0.instance_name_prefixes' + item_type: + type: String + - name: 'patchConfig' + type: NestedObject + description: | + Patch configuration that is applied. + properties: + - name: 'migInstancesAllowed' + type: Boolean + description: | + Allows the patch job to run on Managed instance groups (MIGs). + - name: 'rebootConfig' + type: Enum + description: | + Post-patch reboot settings. + at_least_one_of: + - 'patch_config.0.reboot_config' + - 'patch_config.0.apt' + - 'patch_config.0.yum' + - 'patch_config.0.goo' + - 'patch_config.0.zypper' + - 'patch_config.0.windows_update' + - 'patch_config.0.pre_step' + - 'patch_config.0.post_step' + enum_values: + - 'DEFAULT' + - 'ALWAYS' + - 'NEVER' + - name: 'apt' + type: NestedObject + description: | + Apt update settings. Use this setting to override the default apt patch rules. + at_least_one_of: + - 'patch_config.0.reboot_config' + - 'patch_config.0.apt' + - 'patch_config.0.yum' + - 'patch_config.0.goo' + - 'patch_config.0.zypper' + - 'patch_config.0.windows_update' + - 'patch_config.0.pre_step' + - 'patch_config.0.post_step' + properties: + - name: 'type' + type: Enum + description: | + By changing the type to DIST, the patching is performed using apt-get dist-upgrade instead. + at_least_one_of: + - 'patch_config.0.apt.0.type' + - 'patch_config.0.apt.0.excludes' + - 'patch_config.0.apt.0.exclusive_packages' + enum_values: + - 'DIST' + - 'UPGRADE' + - name: 'excludes' + type: Array + description: | + List of packages to exclude from update. These packages will be excluded. + at_least_one_of: + - 'patch_config.0.apt.0.type' + - 'patch_config.0.apt.0.excludes' + - 'patch_config.0.apt.0.exclusive_packages' + item_type: + type: String + - name: 'exclusivePackages' + type: Array + description: | + An exclusive list of packages to be updated. These are the only packages that will be updated. + If these packages are not installed, they will be ignored. This field cannot be specified with + any other patch configuration fields. + at_least_one_of: + - 'patch_config.0.apt.0.type' + - 'patch_config.0.apt.0.excludes' + - 'patch_config.0.apt.0.exclusive_packages' + item_type: + type: String + - name: 'yum' + type: NestedObject + description: | + Yum update settings. Use this setting to override the default yum patch rules. + at_least_one_of: + - 'patch_config.0.reboot_config' + - 'patch_config.0.apt' + - 'patch_config.0.yum' + - 'patch_config.0.goo' + - 'patch_config.0.zypper' + - 'patch_config.0.windows_update' + - 'patch_config.0.pre_step' + - 'patch_config.0.post_step' + properties: + - name: 'security' + type: Boolean + description: | + Adds the --security flag to yum update. Not supported on all platforms. + at_least_one_of: + - 'patch_config.0.yum.0.security' + - 'patch_config.0.yum.0.minimal' + - 'patch_config.0.yum.0.excludes' + - 'patch_config.0.yum.0.exclusive_packages' + - name: 'minimal' + type: Boolean + description: | + Will cause patch to run yum update-minimal instead. + at_least_one_of: + - 'patch_config.0.yum.0.security' + - 'patch_config.0.yum.0.minimal' + - 'patch_config.0.yum.0.excludes' + - 'patch_config.0.yum.0.exclusive_packages' + - name: 'excludes' + type: Array + description: | + List of packages to exclude from update. These packages will be excluded. + at_least_one_of: + - 'patch_config.0.yum.0.security' + - 'patch_config.0.yum.0.minimal' + - 'patch_config.0.yum.0.excludes' + - 'patch_config.0.yum.0.exclusive_packages' + item_type: + type: String + - name: 'exclusivePackages' + type: Array + description: | + An exclusive list of packages to be updated. These are the only packages that will be updated. + If these packages are not installed, they will be ignored. This field cannot be specified with + any other patch configuration fields. + at_least_one_of: + - 'patch_config.0.yum.0.security' + - 'patch_config.0.yum.0.minimal' + - 'patch_config.0.yum.0.excludes' + - 'patch_config.0.yum.0.exclusive_packages' + item_type: + type: String + - name: 'goo' + type: NestedObject + description: | + goo update settings. Use this setting to override the default goo patch rules. + at_least_one_of: + - 'patch_config.0.reboot_config' + - 'patch_config.0.apt' + - 'patch_config.0.yum' + - 'patch_config.0.goo' + - 'patch_config.0.zypper' + - 'patch_config.0.windows_update' + - 'patch_config.0.pre_step' + - 'patch_config.0.post_step' + properties: + - name: 'enabled' + type: Boolean + description: | + goo update settings. Use this setting to override the default goo patch rules. + required: true + - name: 'zypper' + type: NestedObject + description: | + zypper update settings. Use this setting to override the default zypper patch rules. + at_least_one_of: + - 'patch_config.0.reboot_config' + - 'patch_config.0.apt' + - 'patch_config.0.yum' + - 'patch_config.0.goo' + - 'patch_config.0.zypper' + - 'patch_config.0.windows_update' + - 'patch_config.0.pre_step' + - 'patch_config.0.post_step' + properties: + - name: 'withOptional' + type: Boolean + description: | + Adds the --with-optional flag to zypper patch. + at_least_one_of: + - 'patch_config.0.zypper.0.withOptional' + - 'patch_config.0.zypper.0.withUpdate' + - 'patch_config.0.zypper.0.categories' + - 'patch_config.0.zypper.0.severities' + - 'patch_config.0.zypper.0.excludes' + - 'patch_config.0.zypper.0.exclusive_patches' + - name: 'withUpdate' + type: Boolean + description: | + Adds the --with-update flag, to zypper patch. + at_least_one_of: + - 'patch_config.0.zypper.0.withOptional' + - 'patch_config.0.zypper.0.withUpdate' + - 'patch_config.0.zypper.0.categories' + - 'patch_config.0.zypper.0.severities' + - 'patch_config.0.zypper.0.excludes' + - 'patch_config.0.zypper.0.exclusive_patches' + - name: 'categories' + type: Array + description: | + Install only patches with these categories. Common categories include security, recommended, and feature. + at_least_one_of: + - 'patch_config.0.zypper.0.withOptional' + - 'patch_config.0.zypper.0.withUpdate' + - 'patch_config.0.zypper.0.categories' + - 'patch_config.0.zypper.0.severities' + - 'patch_config.0.zypper.0.excludes' + - 'patch_config.0.zypper.0.exclusive_patches' + item_type: + type: String + - name: 'severities' + type: Array + description: | + Install only patches with these severities. Common severities include critical, important, moderate, and low. + at_least_one_of: + - 'patch_config.0.zypper.0.withOptional' + - 'patch_config.0.zypper.0.withUpdate' + - 'patch_config.0.zypper.0.categories' + - 'patch_config.0.zypper.0.severities' + - 'patch_config.0.zypper.0.excludes' + - 'patch_config.0.zypper.0.exclusive_patches' + item_type: + type: String + - name: 'excludes' + type: Array + description: | + List of packages to exclude from update. + at_least_one_of: + - 'patch_config.0.zypper.0.withOptional' + - 'patch_config.0.zypper.0.withUpdate' + - 'patch_config.0.zypper.0.categories' + - 'patch_config.0.zypper.0.severities' + - 'patch_config.0.zypper.0.excludes' + - 'patch_config.0.zypper.0.exclusive_patches' + item_type: + type: String + - name: 'exclusivePatches' + type: Array + description: | + An exclusive list of patches to be updated. These are the only patches that will be installed using 'zypper patch patch:' command. + This field must not be used with any other patch configuration fields. + at_least_one_of: + - 'patch_config.0.zypper.0.withOptional' + - 'patch_config.0.zypper.0.withUpdate' + - 'patch_config.0.zypper.0.categories' + - 'patch_config.0.zypper.0.severities' + - 'patch_config.0.zypper.0.excludes' + - 'patch_config.0.zypper.0.exclusive_patches' + item_type: + type: String + - name: 'windowsUpdate' + type: NestedObject + description: | + Windows update settings. Use this setting to override the default Windows patch rules. + at_least_one_of: + - 'patch_config.0.reboot_config' + - 'patch_config.0.apt' + - 'patch_config.0.yum' + - 'patch_config.0.goo' + - 'patch_config.0.zypper' + - 'patch_config.0.windows_update' + - 'patch_config.0.pre_step' + - 'patch_config.0.post_step' + properties: + - name: 'classifications' + type: Array + description: | + Only apply updates of these windows update classifications. If empty, all updates are applied. + conflicts: + - patch_config.0.windows_update.0.exclusive_patches + at_least_one_of: + - 'patch_config.0.windows_update.0.classifications' + - 'patch_config.0.windows_update.0.excludes' + - 'patch_config.0.windows_update.0.exclusive_patches' + item_type: + type: Enum + description: 'What type of updates should we apply?' + enum_values: + - 'CRITICAL' + - 'SECURITY' + - 'DEFINITION' + - 'DRIVER' + - 'FEATURE_PACK' + - 'SERVICE_PACK' + - 'TOOL' + - 'UPDATE_ROLLUP' + - 'UPDATE' + - name: 'excludes' + type: Array + description: | + List of KBs to exclude from update. + conflicts: + - patch_config.0.windows_update.0.exclusive_patches + at_least_one_of: + - 'patch_config.0.windows_update.0.classifications' + - 'patch_config.0.windows_update.0.excludes' + - 'patch_config.0.windows_update.0.exclusive_patches' + item_type: + type: String + - name: 'exclusivePatches' + type: Array + description: | + An exclusive list of kbs to be updated. These are the only patches that will be updated. + This field must not be used with other patch configurations. + conflicts: + - patch_config.0.windows_update.0.classifications + - patch_config.0.windows_update.0.excludes + at_least_one_of: + - 'patch_config.0.windows_update.0.classifications' + - 'patch_config.0.windows_update.0.excludes' + - 'patch_config.0.windows_update.0.exclusive_patches' + item_type: + type: String + - name: 'preStep' + type: NestedObject + description: | + The ExecStep to run before the patch update. + at_least_one_of: + - 'patch_config.0.reboot_config' + - 'patch_config.0.apt' + - 'patch_config.0.yum' + - 'patch_config.0.goo' + - 'patch_config.0.zypper' + - 'patch_config.0.windows_update' + - 'patch_config.0.pre_step' + - 'patch_config.0.post_step' + properties: + - name: 'linuxExecStepConfig' + type: NestedObject + description: | + The ExecStepConfig for all Linux VMs targeted by the PatchJob. + at_least_one_of: + - 'patch_config.0.pre_step.0.linux_exec_step_config' + - 'patch_config.0.pre_step.0.windows_exec_step_config' + properties: + - name: 'allowedSuccessCodes' + type: Array + description: | + Defaults to [0]. A list of possible return values that the execution can return to indicate a success. + item_type: + type: Integer + - name: 'interpreter' + type: Enum + description: | + The script interpreter to use to run the script. If no interpreter is specified the script will + be executed directly, which will likely only succeed for scripts with shebang lines. + enum_values: + - 'SHELL' + - 'POWERSHELL' + - name: 'localPath' + type: String + description: | + An absolute path to the executable on the VM. + exactly_one_of: + - 'patch_config.0.pre_step.0.linux_exec_step_config.0.local_path' + - 'patch_config.0.pre_step.0.linux_exec_step_config.0.gcs_object' + - name: 'gcsObject' + type: NestedObject + description: | + A Cloud Storage object containing the executable. + exactly_one_of: + - 'patch_config.0.pre_step.0.linux_exec_step_config.0.local_path' + - 'patch_config.0.pre_step.0.linux_exec_step_config.0.gcs_object' + properties: + - name: 'bucket' + type: String + description: | + Bucket of the Cloud Storage object. + required: true + - name: 'object' + type: String + description: | + Name of the Cloud Storage object. + required: true + - name: 'generationNumber' + type: String + description: | + Generation number of the Cloud Storage object. This is used to ensure that the ExecStep specified by this PatchJob does not change. + required: true + - name: 'windowsExecStepConfig' + type: NestedObject + description: | + The ExecStepConfig for all Windows VMs targeted by the PatchJob. + at_least_one_of: + - 'patch_config.0.pre_step.0.linux_exec_step_config' + - 'patch_config.0.pre_step.0.windows_exec_step_config' + properties: + - name: 'allowedSuccessCodes' + type: Array + description: | + Defaults to [0]. A list of possible return values that the execution can return to indicate a success. + item_type: + type: Integer + - name: 'interpreter' + type: Enum + description: | + The script interpreter to use to run the script. If no interpreter is specified the script will + be executed directly, which will likely only succeed for scripts with shebang lines. + enum_values: + - 'SHELL' + - 'POWERSHELL' + - name: 'localPath' + type: String + description: | + An absolute path to the executable on the VM. + exactly_one_of: + - 'patch_config.0.pre_step.0.windows_exec_step_config.0.local_path' + - 'patch_config.0.pre_step.0.windows_exec_step_config.0.gcs_object' + - name: 'gcsObject' + type: NestedObject + description: | + A Cloud Storage object containing the executable. + exactly_one_of: + - 'patch_config.0.pre_step.0.windows_exec_step_config.0.local_path' + - 'patch_config.0.pre_step.0.windows_exec_step_config.0.gcs_object' + properties: + - name: 'bucket' + type: String + description: | + Bucket of the Cloud Storage object. + required: true + - name: 'object' + type: String + description: | + Name of the Cloud Storage object. + required: true + - name: 'generationNumber' + type: String + description: | + Generation number of the Cloud Storage object. This is used to ensure that the ExecStep specified by this PatchJob does not change. + required: true + - name: 'postStep' + type: NestedObject + description: | + The ExecStep to run after the patch update. + at_least_one_of: + - 'patch_config.0.reboot_config' + - 'patch_config.0.apt' + - 'patch_config.0.yum' + - 'patch_config.0.goo' + - 'patch_config.0.zypper' + - 'patch_config.0.windows_update' + - 'patch_config.0.pre_step' + - 'patch_config.0.post_step' + properties: + - name: 'linuxExecStepConfig' + type: NestedObject + description: | + The ExecStepConfig for all Linux VMs targeted by the PatchJob. + at_least_one_of: + - 'patch_config.0.post_step.0.linux_exec_step_config' + - 'patch_config.0.post_step.0.windows_exec_step_config' + properties: + - name: 'allowedSuccessCodes' + type: Array + description: | + Defaults to [0]. A list of possible return values that the execution can return to indicate a success. + item_type: + type: Integer + - name: 'interpreter' + type: Enum + description: | + The script interpreter to use to run the script. If no interpreter is specified the script will + be executed directly, which will likely only succeed for scripts with shebang lines. + enum_values: + - 'SHELL' + - 'POWERSHELL' + - name: 'localPath' + type: String + description: | + An absolute path to the executable on the VM. + exactly_one_of: + - 'patch_config.0.post_step.0.linux_exec_step_config.0.local_path' + - 'patch_config.0.post_step.0.linux_exec_step_config.0.gcs_object' + - name: 'gcsObject' + type: NestedObject + description: | + A Cloud Storage object containing the executable. + exactly_one_of: + - 'patch_config.0.post_step.0.linux_exec_step_config.0.local_path' + - 'patch_config.0.post_step.0.linux_exec_step_config.0.gcs_object' + properties: + - name: 'bucket' + type: String + description: | + Bucket of the Cloud Storage object. + required: true + - name: 'object' + type: String + description: | + Name of the Cloud Storage object. + required: true + - name: 'generationNumber' + type: String + description: | + Generation number of the Cloud Storage object. This is used to ensure that the ExecStep specified by this PatchJob does not change. + required: true + - name: 'windowsExecStepConfig' + type: NestedObject + description: | + The ExecStepConfig for all Windows VMs targeted by the PatchJob. + at_least_one_of: + - 'patch_config.0.post_step.0.linux_exec_step_config' + - 'patch_config.0.post_step.0.windows_exec_step_config' + properties: + - name: 'allowedSuccessCodes' + type: Array + description: | + Defaults to [0]. A list of possible return values that the execution can return to indicate a success. + item_type: + type: Integer + - name: 'interpreter' + type: Enum + description: | + The script interpreter to use to run the script. If no interpreter is specified the script will + be executed directly, which will likely only succeed for scripts with shebang lines. + enum_values: + - 'SHELL' + - 'POWERSHELL' + - name: 'localPath' + type: String + description: | + An absolute path to the executable on the VM. + exactly_one_of: + - 'patch_config.0.post_step.0.windows_exec_step_config.0.local_path' + - 'patch_config.0.post_step.0.windows_exec_step_config.0.gcs_object' + - name: 'gcsObject' + type: NestedObject + description: | + A Cloud Storage object containing the executable. + exactly_one_of: + - 'patch_config.0.post_step.0.windows_exec_step_config.0.local_path' + - 'patch_config.0.post_step.0.windows_exec_step_config.0.gcs_object' + properties: + - name: 'bucket' + type: String + description: | + Bucket of the Cloud Storage object. + required: true + - name: 'object' + type: String + description: | + Name of the Cloud Storage object. + required: true + - name: 'generationNumber' + type: String + description: | + Generation number of the Cloud Storage object. This is used to ensure that the ExecStep specified by this PatchJob does not change. + required: true + - name: 'duration' + type: String + description: | + Duration of the patch. After the duration ends, the patch times out. + A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s" + - name: 'createTime' + type: String + description: | + Time the patch deployment was created. Timestamp is in RFC3339 text format. + A timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds. Example: "2014-10-02T15:01:23.045123456Z". + output: true + - name: 'updateTime' + type: String + description: | + Time the patch deployment was last updated. Timestamp is in RFC3339 text format. + A timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds. Example: "2014-10-02T15:01:23.045123456Z". + output: true + - name: 'lastExecuteTime' + type: String + description: | + The last time a patch job was started by this deployment. Timestamp is in RFC3339 text format. + A timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds. Example: "2014-10-02T15:01:23.045123456Z". + output: true + - name: 'oneTimeSchedule' + type: NestedObject + description: | + Schedule a one-time execution. + exactly_one_of: + - 'one_time_schedule' + - 'recurring_schedule' + properties: + - name: 'executeTime' + type: String + description: | + The desired patch job execution time. A timestamp in RFC3339 UTC "Zulu" format, + accurate to nanoseconds. Example: "2014-10-02T15:01:23.045123456Z". + required: true + - name: 'recurringSchedule' + type: NestedObject + description: | + Schedule recurring executions. + exactly_one_of: + - 'one_time_schedule' + - 'recurring_schedule' + properties: + - name: 'timeZone' + type: NestedObject + description: | + Defines the time zone that timeOfDay is relative to. The rules for daylight saving time are + determined by the chosen time zone. + required: true + properties: + - name: 'id' + type: String + description: | + IANA Time Zone Database time zone, e.g. "America/New_York". + required: true + - name: 'version' + type: String + description: | + IANA Time Zone Database version number, e.g. "2019a". + - name: 'startTime' + type: String + description: | + The time that the recurring schedule becomes effective. Defaults to createTime of the patch deployment. + A timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds. Example: "2014-10-02T15:01:23.045123456Z". + - name: 'endTime' + type: String + description: | + The end time at which a recurring patch deployment schedule is no longer active. + A timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds. Example: "2014-10-02T15:01:23.045123456Z". + - name: 'timeOfDay' + type: NestedObject + description: | + Time of the day to run a recurring deployment. + required: true + send_empty_value: true + custom_flatten: 'templates/terraform/custom_flatten/go/os_config_patch_deployment_recurring_schedule_time_of_day.go.tmpl' + properties: + - name: 'hours' + type: Integer + description: | + Hours of day in 24 hour format. Should be from 0 to 23. + An API may choose to allow the value "24:00:00" for scenarios like business closing time. + at_least_one_of: + - 'recurring_schedule.0.time_of_day.0.hours' + - 'recurring_schedule.0.time_of_day.0.minutes' + - 'recurring_schedule.0.time_of_day.0.seconds' + - 'recurring_schedule.0.time_of_day.0.nanos' + validation: + function: 'validation.IntBetween(0,23)' + - name: 'minutes' + type: Integer + description: | + Minutes of hour of day. Must be from 0 to 59. + at_least_one_of: + - 'recurring_schedule.0.time_of_day.0.hours' + - 'recurring_schedule.0.time_of_day.0.minutes' + - 'recurring_schedule.0.time_of_day.0.seconds' + - 'recurring_schedule.0.time_of_day.0.nanos' + validation: + function: 'validation.IntBetween(0,59)' + - name: 'seconds' + type: Integer + description: | + Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + at_least_one_of: + - 'recurring_schedule.0.time_of_day.0.hours' + - 'recurring_schedule.0.time_of_day.0.minutes' + - 'recurring_schedule.0.time_of_day.0.seconds' + - 'recurring_schedule.0.time_of_day.0.nanos' + validation: + function: 'validation.IntBetween(0,60)' + - name: 'nanos' + type: Integer + description: | + Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. + at_least_one_of: + - 'recurring_schedule.0.time_of_day.0.hours' + - 'recurring_schedule.0.time_of_day.0.minutes' + - 'recurring_schedule.0.time_of_day.0.seconds' + - 'recurring_schedule.0.time_of_day.0.nanos' + validation: + function: 'validation.IntBetween(0,999999999)' + - name: 'lastExecuteTime' + type: String + description: | + The time the last patch job ran successfully. + A timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds. Example: "2014-10-02T15:01:23.045123456Z". + output: true + - name: 'nextExecuteTime' + type: String + description: | + The time the next patch job is scheduled to run. + A timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds. Example: "2014-10-02T15:01:23.045123456Z". + output: true + - name: 'weekly' + type: NestedObject + description: | + Schedule with weekly executions. + properties: + - name: 'dayOfWeek' + type: Enum + description: | + IANA Time Zone Database time zone, e.g. "America/New_York". + required: true + enum_values: + - 'MONDAY' + - 'TUESDAY' + - 'WEDNESDAY' + - 'THURSDAY' + - 'FRIDAY' + - 'SATURDAY' + - 'SUNDAY' + - name: 'monthly' + type: NestedObject + description: | + Schedule with monthly executions. + properties: + - name: 'weekDayOfMonth' + type: NestedObject + description: | + Week day in a month. + exactly_one_of: + - 'recurring_schedule.0.monthly.0.week_day_of_month' + - 'recurring_schedule.0.monthly.0.month_day' + properties: + - name: 'weekOrdinal' + type: Integer + description: | + Week number in a month. 1-4 indicates the 1st to 4th week of the month. -1 indicates the last week of the month. + required: true + validation: + function: 'validation.IntBetween(-1,4)' + - name: 'dayOfWeek' + type: Enum + description: | + A day of the week. + required: true + enum_values: + - 'MONDAY' + - 'TUESDAY' + - 'WEDNESDAY' + - 'THURSDAY' + - 'FRIDAY' + - 'SATURDAY' + - 'SUNDAY' + - name: 'dayOffset' + type: Integer + description: | + Represents the number of days before or after the given week day of month that the patch deployment is scheduled for. + validation: + function: 'validation.IntBetween(-30,30)' + - name: 'monthDay' + type: Integer + description: | + One day of the month. 1-31 indicates the 1st to the 31st day. -1 indicates the last day of the month. + Months without the target day will be skipped. For example, a schedule to run "every month on the 31st" + will not run in February, April, June, etc. + exactly_one_of: + - 'recurring_schedule.0.monthly.0.week_day_of_month' + - 'recurring_schedule.0.monthly.0.month_day' + validation: + function: 'validation.IntBetween(-1,31)' + - name: 'rollout' + type: NestedObject + description: | + Rollout strategy of the patch job. + properties: + - name: 'mode' + type: Enum + description: | + Mode of the patch rollout. + required: true + enum_values: + - 'ZONE_BY_ZONE' + - 'CONCURRENT_ZONES' + - name: 'disruptionBudget' + type: NestedObject + description: | + The maximum number (or percentage) of VMs per zone to disrupt at any given moment. The number of VMs calculated from multiplying the percentage by the total number of VMs in a zone is rounded up. + During patching, a VM is considered disrupted from the time the agent is notified to begin until patching has completed. This disruption time includes the time to complete reboot and any post-patch steps. + A VM contributes to the disruption budget if its patching operation fails either when applying the patches, running pre or post patch steps, or if it fails to respond with a success notification before timing out. VMs that are not running or do not have an active agent do not count toward this disruption budget. + For zone-by-zone rollouts, if the disruption budget in a zone is exceeded, the patch job stops, because continuing to the next zone requires completion of the patch process in the previous zone. + For example, if the disruption budget has a fixed value of 10, and 8 VMs fail to patch in the current zone, the patch job continues to patch 2 VMs at a time until the zone is completed. When that zone is completed successfully, patching begins with 10 VMs at a time in the next zone. If 10 VMs in the next zone fail to patch, the patch job stops. + required: true + properties: + - name: 'fixed' + type: Integer + description: | + Specifies a fixed value. + exactly_one_of: + - 'rollout.0.disruption_budget.0.fixed' + - 'rollout.0.disruption_budget.0.percentage' + validation: + function: 'validation.IntAtLeast(1)' + - name: 'percentage' + type: Integer + description: | + Specifies the relative value defined as a percentage, which will be multiplied by a reference value. + api_name: percent + exactly_one_of: + - 'rollout.0.disruption_budget.0.fixed' + - 'rollout.0.disruption_budget.0.percentage' + validation: + function: 'validation.IntBetween(0,100)' diff --git a/mmv1/products/osconfig/go_product.yaml b/mmv1/products/osconfig/go_product.yaml new file mode 100644 index 000000000000..0cb5f83874bd --- /dev/null +++ b/mmv1/products/osconfig/go_product.yaml @@ -0,0 +1,25 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'OSConfig' +display_name: 'OS Config' +versions: + - name: 'ga' + base_url: 'https://osconfig.googleapis.com/v1/' + - name: 'beta' + base_url: 'https://osconfig.googleapis.com/v1beta/' +scopes: + - 'https://www.googleapis.com/auth/cloud-platform' + - 'https://www.googleapis.com/auth/compute' diff --git a/mmv1/products/oslogin/go_SSHPublicKey.yaml b/mmv1/products/oslogin/go_SSHPublicKey.yaml new file mode 100644 index 000000000000..241d8afb5318 --- /dev/null +++ b/mmv1/products/oslogin/go_SSHPublicKey.yaml @@ -0,0 +1,73 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'SSHPublicKey' +kind: 'user#sshPublicKeys' +description: | + The SSH public key information associated with a Google account. +references: + guides: + 'Official Documentation': 'https://cloud.google.com/compute/docs/oslogin' + api: 'https://cloud.google.com/compute/docs/oslogin/rest/v1/users.sshPublicKeys' +docs: +id_format: 'users/{{user}}/sshPublicKeys/{{fingerprint}}' +base_url: 'users/{{user}}/sshPublicKeys/{{fingerprint}}' +create_url: 'users/{{user}}:importSshPublicKey' +update_verb: 'PATCH' +update_mask: true +import_format: + - 'users/{{user}}/sshPublicKeys/{{fingerprint}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +custom_code: + pre_create: 'templates/terraform/pre_create/go/os_login_ssh_public_key.go.tmpl' + post_create: 'templates/terraform/post_create/go/sshkeyfingerprint.go.tmpl' +skip_sweeper: true +examples: + - name: 'os_login_ssh_key_basic' + primary_resource_id: 'cache' +parameters: + - name: 'user' + type: String + description: | + The user email. + url_param_only: true + required: true + immutable: true + - name: 'project' + type: String + description: | + The project ID of the Google Cloud Platform project. + url_param_only: true + immutable: true +properties: + - name: 'key' + type: String + description: | + Public key text in SSH format, defined by RFC4253 section 6.6. + required: true + immutable: true + - name: 'expirationTimeUsec' + type: String + description: | + An expiration time in microseconds since epoch. + required: false + - name: 'fingerprint' + type: String + description: | + The SHA-256 fingerprint of the SSH public key. + output: true diff --git a/mmv1/products/oslogin/go_product.yaml b/mmv1/products/oslogin/go_product.yaml new file mode 100644 index 000000000000..624498377b60 --- /dev/null +++ b/mmv1/products/oslogin/go_product.yaml @@ -0,0 +1,23 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'OSLogin' +display_name: 'OS Login' +versions: + - name: 'ga' + base_url: 'https://oslogin.googleapis.com/v1/' +scopes: + - 'https://www.googleapis.com/auth/cloud-platform' + - 'https://www.googleapis.com/auth/compute' diff --git a/mmv1/products/parallelstore/Instance.yaml b/mmv1/products/parallelstore/Instance.yaml index 006d611f2d9c..881bb5b5790a 100644 --- a/mmv1/products/parallelstore/Instance.yaml +++ b/mmv1/products/parallelstore/Instance.yaml @@ -23,6 +23,24 @@ description: A Parallelstore Instance. update_verb: :PATCH update_mask: true autogen_async: true +async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: name + base_url: "{{op_id}}" + wait_ms: 1000 + timeouts: + result: !ruby/object:Api::OpAsync::Result + path: response + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: done + complete: true + allowed: + - true + - false + error: !ruby/object:Api::OpAsync::Error + path: error + message: message examples: - !ruby/object:Provider::Terraform::Examples name: 'parallelstore_instance_basic' @@ -32,126 +50,145 @@ examples: name: 'instance' network_name: 'network' address_name: 'address' +parameters: + - !ruby/object:Api::Type::String + name: location + description: | + Part of `parent`. See documentation of `projectsId`. + url_param_only: true + required: true + immutable: true + - !ruby/object:Api::Type::String + name: instanceId + description: | + The logical name of the Parallelstore instance in the user project with the following restrictions: + * Must contain only lowercase letters, numbers, and hyphens. + * Must start with a letter. + * Must be between 1-63 characters. + * Must end with a number or a letter. + * Must be unique within the customer project/ location + url_param_only: true + required: true + immutable: true properties: - !ruby/object:Api::Type::String name: name - description: "Identifier. The resource name of the instance, in the format\n`projects/{project}/locations/{location}/instances/{instance_id}` " + description: | + Identifier. The resource name of the instance, in the format + `projects/{project}/locations/{location}/instances/{instance_id}` output: true - !ruby/object:Api::Type::String name: description - description: 'The description of the instance. 2048 characters or less. ' + description: | + The description of the instance. 2048 characters or less. - !ruby/object:Api::Type::String name: state - description: "The instance state. \n Possible values:\n STATE_UNSPECIFIED\nCREATING\nACTIVE\nDELETING\nFAILED\nUPGRADING" + description: | + The instance state. + Possible values: + STATE_UNSPECIFIED + CREATING + ACTIVE + DELETING + FAILED + UPGRADING output: true - !ruby/object:Api::Type::String name: createTime - description: 'The time when the instance was created. ' + description: | + The time when the instance was created. output: true - !ruby/object:Api::Type::String name: updateTime - description: 'The time when the instance was updated. ' + description: | + The time when the instance was updated. output: true - !ruby/object:Api::Type::KeyValueLabels name: labels output: api_name: - description: "Cloud Labels are a flexible and lightweight mechanism for - organizing cloud\nresources into groups that reflect a customer's organizational - needs and\ndeployment strategies. Cloud Labels can be used to filter collections - of\nresources. They can be used to control how resource metrics are aggregated.\nAnd - they can be used as arguments to policy management rules (e.g. route,\nfirewall, - load balancing, etc.).\n\n * Label keys must be between 1 and 63 characters long - and must conform to\n the following regular expression: `a-z{0,62}`.\n * Label - values must be between 0 and 63 characters long and must conform\n to the regular - expression `[a-z0-9_-]{0,63}`.\n * No more than 64 labels can be associated with - a given resource.\n\nSee https://goo.gl/xmQnxf for more information on and examples - of labels.\n\nIf you plan to use labels in your own code, please note that additional\ncharacters - may be allowed in the future. Therefore, you are advised to use\nan internal label - representation, such as JSON, which doesn't rely upon\nspecific characters being - disallowed. For example, representing labels\nas the string: name + \"_\" + - value would prove problematic if we were to\nallow \"_\" in a future release. " + description: | + Cloud Labels are a flexible and lightweight mechanism for + organizing cloud resources into groups that reflect a customer's organizational + needs and deployment strategies. Cloud Labels can be used to filter collections + of resources. They can be used to control how resource metrics are aggregated. + And they can be used as arguments to policy management rules (e.g. route, firewall, + load balancing, etc.). + + * Label keys must be between 1 and 63 characters long and must conform to + the following regular expression: `a-z{0,62}`. + * Label values must be between 0 and 63 characters long and must conform + to the regular expression `[a-z0-9_-]{0,63}`. + * No more than 64 labels can be associated with a given resource. + + See https://goo.gl/xmQnxf for more information on and examples of labels. + + If you plan to use labels in your own code, please note that additional + characters may be allowed in the future. Therefore, you are advised to use + an internal label representation, such as JSON, which doesn't rely upon + specific characters being disallowed. For example, representing labels + as the string: `name + \"_\" + value` would prove problematic if we were to + allow `\"_\"` in a future release. " - !ruby/object:Api::Type::String name: capacityGib - description: 'Required. Immutable. Storage capacity of Parallelstore instance in - Gibibytes (GiB). ' + description: | + Required. Immutable. Storage capacity of Parallelstore instance in Gibibytes (GiB). required: true immutable: true - !ruby/object:Api::Type::String name: daosVersion - description: 'The version of DAOS software running in the instance ' + description: | + The version of DAOS software running in the instance. output: true - !ruby/object:Api::Type::Array name: accessPoints item_type: Api::Type::String - description: "Output only. List of access_points.\nContains a list of IPv4 addresses - used for client side configuration. " + description: | + Output only. List of access_points. + Contains a list of IPv4 addresses used for client side configuration. output: true - !ruby/object:Api::Type::String name: network - description: "Immutable. The name of the Google Compute Engine\n[VPC network](https://cloud.google.com/vpc/docs/vpc) - to which the\ninstance is connected. " + description: | + Immutable. The name of the Google Compute Engine [VPC network](https://cloud.google.com/vpc/docs/vpc) + to which the instance is connected. immutable: true - !ruby/object:Api::Type::String name: reservedIpRange - description: "Immutable. Contains the id of the allocated IP address range - associated with the\nprivate service access connection for example, \"test-default\" - associated\nwith IP range 10.0.0.0/29. If no range id is provided all ranges will - be\nconsidered. " + description: | + Immutable. Contains the id of the allocated IP address range + associated with the private service access connection for example, \"test-default\" + associated with IP range 10.0.0.0/29. If no range id is provided all ranges will + be considered. immutable: true - !ruby/object:Api::Type::String name: effectiveReservedIpRange - description: "Immutable. Contains the id of the allocated IP address - range associated with the\nprivate service access connection for example, \"test-default\" - associated\nwith IP range 10.0.0.0/29. This field is populated by the service - and\nand contains the value currently used by the service. " + description: | + Immutable. Contains the id of the allocated IP address + range associated with the private service access connection for example, \"test-default\" + associated with IP range 10.0.0.0/29. This field is populated by the service + and contains the value currently used by the service. immutable: true output: true - !ruby/object:Api::Type::String name: fileStripeLevel - description: "Stripe level for files.\nMIN better suited for small size - files.\nMAX higher throughput performance for larger files. \n Possible values:\n - FILE_STRIPE_LEVEL_UNSPECIFIED\nFILE_STRIPE_LEVEL_MIN\nFILE_STRIPE_LEVEL_BALANCED\nFILE_STRIPE_LEVEL_MAX" + description: | + Stripe level for files. + MIN better suited for small size files. + MAX higher throughput performance for larger files. + Possible values: + FILE_STRIPE_LEVEL_UNSPECIFIED + FILE_STRIPE_LEVEL_MIN + FILE_STRIPE_LEVEL_BALANCED + FILE_STRIPE_LEVEL_MAX - !ruby/object:Api::Type::String name: directoryStripeLevel - description: "Stripe level for directories.\nMIN when directory has a - small number of files.\nMAX when directory has a large number of files. \n Possible - values:\n DIRECTORY_STRIPE_LEVEL_UNSPECIFIED\nDIRECTORY_STRIPE_LEVEL_MIN\nDIRECTORY_STRIPE_LEVEL_BALANCED\nDIRECTORY_STRIPE_LEVEL_MAX" -parameters: - - !ruby/object:Api::Type::String - name: location - description: "Part of `parent`. See documentation of `projectsId`." - url_param_only: true - required: true - immutable: true - - !ruby/object:Api::Type::String - name: instanceId description: | - The logical name of the Parallelstore instance in the user project with the following restrictions: - - * Must contain only lowercase letters, numbers, and hyphens. - * Must start with a letter. - * Must be between 1-63 characters. - * Must end with a number or a letter. - * Must be unique within the customer project/ location - url_param_only: true - required: true - immutable: true -async: !ruby/object:Api::OpAsync - operation: !ruby/object:Api::OpAsync::Operation - path: name - base_url: "{{op_id}}" - wait_ms: 1000 - timeouts: - result: !ruby/object:Api::OpAsync::Result - path: response - resource_inside_response: true - status: !ruby/object:Api::OpAsync::Status - path: done - complete: true - allowed: - - true - - false - error: !ruby/object:Api::OpAsync::Error - path: error - message: message + Stripe level for directories. + MIN when directory has a small number of files. + MAX when directory has a large number of files. + Possible values: + DIRECTORY_STRIPE_LEVEL_UNSPECIFIED + DIRECTORY_STRIPE_LEVEL_MIN + DIRECTORY_STRIPE_LEVEL_BALANCED + DIRECTORY_STRIPE_LEVEL_MAX diff --git a/mmv1/products/parallelstore/go_Instance.yaml b/mmv1/products/parallelstore/go_Instance.yaml new file mode 100644 index 000000000000..d066bb7131d0 --- /dev/null +++ b/mmv1/products/parallelstore/go_Instance.yaml @@ -0,0 +1,211 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'Instance' +description: A Parallelstore Instance. +min_version: 'beta' +docs: +id_format: 'projects/{{project}}/locations/{{location}}/instances/{{instance_id}}' +base_url: 'projects/{{project}}/locations/{{location}}/instances' +self_link: 'projects/{{project}}/locations/{{location}}/instances/{{instance_id}}' +create_url: 'projects/{{project}}/locations/{{location}}/instances?instanceId={{instance_id}}' +update_verb: 'PATCH' +update_mask: true +import_format: + - 'projects/{{project}}/locations/{{location}}/instances/{{instance_id}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +autogen_async: true +async: + actions: ['create', 'delete', 'update'] + type: 'OpAsync' + operation: + base_url: '{{op_id}}' + path: 'name' + wait_ms: 1000 + result: + path: 'response' + resource_inside_response: true + error: + path: 'error' + message: 'message' +custom_code: +examples: + - name: 'parallelstore_instance_basic' + primary_resource_id: 'instance' + min_version: 'beta' + vars: + name: 'instance' + network_name: 'network' + address_name: 'address' +parameters: + - name: 'location' + type: String + description: | + Part of `parent`. See documentation of `projectsId`. + min_version: 'beta' + url_param_only: true + required: true + immutable: true + - name: 'instanceId' + type: String + description: | + The logical name of the Parallelstore instance in the user project with the following restrictions: + * Must contain only lowercase letters, numbers, and hyphens. + * Must start with a letter. + * Must be between 1-63 characters. + * Must end with a number or a letter. + * Must be unique within the customer project/ location + min_version: 'beta' + url_param_only: true + required: true + immutable: true +properties: + - name: 'name' + type: String + description: | + Identifier. The resource name of the instance, in the format + `projects/{project}/locations/{location}/instances/{instance_id}` + min_version: 'beta' + output: true + - name: 'description' + type: String + description: | + The description of the instance. 2048 characters or less. + min_version: 'beta' + - name: 'state' + type: String + description: | + The instance state. + Possible values: + STATE_UNSPECIFIED + CREATING + ACTIVE + DELETING + FAILED + UPGRADING + min_version: 'beta' + output: true + - name: 'createTime' + type: String + description: | + The time when the instance was created. + min_version: 'beta' + output: true + - name: 'updateTime' + type: String + description: | + The time when the instance was updated. + min_version: 'beta' + output: true + - name: 'labels' + type: KeyValueLabels + description: | + Cloud Labels are a flexible and lightweight mechanism for + organizing cloud resources into groups that reflect a customer's organizational + needs and deployment strategies. Cloud Labels can be used to filter collections + of resources. They can be used to control how resource metrics are aggregated. + And they can be used as arguments to policy management rules (e.g. route, firewall, + load balancing, etc.). + + * Label keys must be between 1 and 63 characters long and must conform to + the following regular expression: `a-z{0,62}`. + * Label values must be between 0 and 63 characters long and must conform + to the regular expression `[a-z0-9_-]{0,63}`. + * No more than 64 labels can be associated with a given resource. + + See https://goo.gl/xmQnxf for more information on and examples of labels. + + If you plan to use labels in your own code, please note that additional + characters may be allowed in the future. Therefore, you are advised to use + an internal label representation, such as JSON, which doesn't rely upon + specific characters being disallowed. For example, representing labels + as the string: `name + \"_\" + value` would prove problematic if we were to + allow `\"_\"` in a future release. " + min_version: 'beta' + - name: 'capacityGib' + type: String + description: | + Required. Immutable. Storage capacity of Parallelstore instance in Gibibytes (GiB). + min_version: 'beta' + required: true + immutable: true + - name: 'daosVersion' + type: String + description: | + The version of DAOS software running in the instance. + min_version: 'beta' + output: true + - name: 'accessPoints' + type: Array + description: | + Output only. List of access_points. + Contains a list of IPv4 addresses used for client side configuration. + min_version: 'beta' + output: true + item_type: + type: String + - name: 'network' + type: String + description: | + Immutable. The name of the Google Compute Engine [VPC network](https://cloud.google.com/vpc/docs/vpc) + to which the instance is connected. + min_version: 'beta' + immutable: true + - name: 'reservedIpRange' + type: String + description: | + Immutable. Contains the id of the allocated IP address range + associated with the private service access connection for example, \"test-default\" + associated with IP range 10.0.0.0/29. If no range id is provided all ranges will + be considered. + min_version: 'beta' + immutable: true + - name: 'effectiveReservedIpRange' + type: String + description: | + Immutable. Contains the id of the allocated IP address + range associated with the private service access connection for example, \"test-default\" + associated with IP range 10.0.0.0/29. This field is populated by the service + and contains the value currently used by the service. + min_version: 'beta' + immutable: true + output: true + - name: 'fileStripeLevel' + type: String + description: | + Stripe level for files. + MIN better suited for small size files. + MAX higher throughput performance for larger files. + Possible values: + FILE_STRIPE_LEVEL_UNSPECIFIED + FILE_STRIPE_LEVEL_MIN + FILE_STRIPE_LEVEL_BALANCED + FILE_STRIPE_LEVEL_MAX + min_version: 'beta' + - name: 'directoryStripeLevel' + type: String + description: | + Stripe level for directories. + MIN when directory has a small number of files. + MAX when directory has a large number of files. + Possible values: + DIRECTORY_STRIPE_LEVEL_UNSPECIFIED + DIRECTORY_STRIPE_LEVEL_MIN + DIRECTORY_STRIPE_LEVEL_BALANCED + DIRECTORY_STRIPE_LEVEL_MAX + min_version: 'beta' diff --git a/mmv1/products/parallelstore/go_product.yaml b/mmv1/products/parallelstore/go_product.yaml new file mode 100644 index 000000000000..53b01851e93e --- /dev/null +++ b/mmv1/products/parallelstore/go_product.yaml @@ -0,0 +1,22 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'Parallelstore' +display_name: 'Parallelstore' +versions: + - name: 'beta' + base_url: 'https://parallelstore.googleapis.com/v1beta/' +scopes: + - 'https://www.googleapis.com/auth/cloud-platform' diff --git a/mmv1/products/privilegedaccessmanager/Entitlement.yaml b/mmv1/products/privilegedaccessmanager/Entitlement.yaml index 5d8e72387418..3a659cb238d6 100644 --- a/mmv1/products/privilegedaccessmanager/Entitlement.yaml +++ b/mmv1/products/privilegedaccessmanager/Entitlement.yaml @@ -24,6 +24,9 @@ description: | update_verb: :PATCH update_mask: true autogen_async: true +custom_code: !ruby/object:Provider::Terraform::CustomCode + pre_update: templates/terraform/pre_update/privileged_access_manager_entitlement.go.erb + constants: templates/terraform/constants/privileged_access_manager_entitlement.go.erb examples: - !ruby/object:Provider::Terraform::Examples name: "privileged_access_manager_entitlement_basic" @@ -32,6 +35,32 @@ examples: entitlement_id: "example-entitlement" test_env_vars: project: :PROJECT_NAME +parameters: + - !ruby/object:Api::Type::String + name: location + description: | + The region of the Entitlement resource. + url_param_only: true + required: true + immutable: true + - !ruby/object:Api::Type::String + name: entitlementId + description: | + The ID to use for this Entitlement. This will become the last part of the resource name. + This value should be 4-63 characters, and valid characters are "[a-z]", "[0-9]", and "-". The first character should be from [a-z]. + This value should be unique among all other Entitlements under the specified `parent`. + url_param_only: true + required: true + immutable: true + validation: !ruby/object:Provider::Terraform::Validation + function: validateEntitlementId + - !ruby/object:Api::Type::String + name: parent + immutable: true + required: true + url_param_only: true + description: | + Format: projects/{project-id|project-number} or organizations/{organization-number} or folders/{folder-number} properties: - !ruby/object:Api::Type::String name: name @@ -231,32 +260,3 @@ properties: description: | Optional. Additional email address to be notified about an eligible entitlement. allow_empty_object: true -parameters: - - !ruby/object:Api::Type::String - name: location - description: | - The region of the Entitlement resource. - url_param_only: true - required: true - immutable: true - - !ruby/object:Api::Type::String - name: entitlementId - description: | - The ID to use for this Entitlement. This will become the last part of the resource name. - This value should be 4-63 characters, and valid characters are "[a-z]", "[0-9]", and "-". The first character should be from [a-z]. - This value should be unique among all other Entitlements under the specified `parent`. - url_param_only: true - required: true - immutable: true - validation: !ruby/object:Provider::Terraform::Validation - function: validateEntitlementId - - !ruby/object:Api::Type::String - name: parent - immutable: true - required: true - url_param_only: true - description: | - Format: projects/{project-id|project-number} or organizations/{organization-number} or folders/{folder-number} -custom_code: !ruby/object:Provider::Terraform::CustomCode - pre_update: templates/terraform/pre_update/privileged_access_manager_entitlement.go.erb - constants: templates/terraform/constants/privileged_access_manager_entitlement.go.erb diff --git a/mmv1/products/privilegedaccessmanager/go_Entitlement.yaml b/mmv1/products/privilegedaccessmanager/go_Entitlement.yaml new file mode 100644 index 000000000000..4e0b1434f4b8 --- /dev/null +++ b/mmv1/products/privilegedaccessmanager/go_Entitlement.yaml @@ -0,0 +1,292 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'Entitlement' +description: | + An Entitlement defines the eligibility of a set of users to obtain a predefined access for some time possibly after going through an approval workflow. +docs: +id_format: '{{parent}}/locations/{{location}}/entitlements/{{entitlement_id}}' +base_url: '{{parent}}/locations/{{location}}/entitlements' +self_link: '{{parent}}/locations/{{location}}/entitlements/{{entitlement_id}}' +create_url: '{{parent}}/locations/{{location}}/entitlements?entitlementId={{entitlement_id}}' +update_verb: 'PATCH' +update_mask: true +delete_url: '{{parent}}/locations/{{location}}/entitlements/{{entitlement_id}}?force=true' +import_format: + - '{{%parent}}/locations/{{location}}/entitlements/{{entitlement_id}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +autogen_async: true +async: + actions: ['create', 'delete', 'update'] + type: 'OpAsync' + operation: + base_url: '{{op_id}}' + path: 'name' + wait_ms: 1000 + result: + path: 'response' + resource_inside_response: true + error: + path: 'error' + message: 'message' +custom_code: + constants: 'templates/terraform/constants/go/privileged_access_manager_entitlement.go.tmpl' + pre_update: 'templates/terraform/pre_update/go/privileged_access_manager_entitlement.go.tmpl' +examples: + - name: 'privileged_access_manager_entitlement_basic' + primary_resource_id: 'tfentitlement' + vars: + entitlement_id: 'example-entitlement' + test_env_vars: + project: 'PROJECT_NAME' +parameters: + - name: 'location' + type: String + description: | + The region of the Entitlement resource. + url_param_only: true + required: true + immutable: true + - name: 'entitlementId' + type: String + description: | + The ID to use for this Entitlement. This will become the last part of the resource name. + This value should be 4-63 characters, and valid characters are "[a-z]", "[0-9]", and "-". The first character should be from [a-z]. + This value should be unique among all other Entitlements under the specified `parent`. + url_param_only: true + required: true + immutable: true + validation: + function: 'validateEntitlementId' + - name: 'parent' + type: String + description: | + Format: projects/{project-id|project-number} or organizations/{organization-number} or folders/{folder-number} + url_param_only: true + required: true + immutable: true +properties: + - name: 'name' + type: String + description: | + Output Only. The entitlement's name follows a hierarchical structure, comprising the organization, folder, or project, alongside the region and a unique entitlement ID. + Formats: organizations/{organization-number}/locations/{region}/entitlements/{entitlement-id}, folders/{folder-number}/locations/{region}/entitlements/{entitlement-id}, and projects/{project-id|project-number}/locations/{region}/entitlements/{entitlement-id}. + output: true + - name: 'createTime' + type: String + description: | + Output only. Create time stamp. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z" + output: true + - name: 'updateTime' + type: String + description: | + Output only. Update time stamp. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + output: true + - name: 'eligibleUsers' + type: Array + description: | + Who can create Grants using Entitlement. This list should contain at most one entry + required: true + item_type: + type: NestedObject + properties: + - name: 'principals' + type: Array + description: | + Users who are being allowed for the operation. Each entry should be a valid v1 IAM Principal Identifier. Format for these is documented at "https://cloud.google.com/iam/docs/principal-identifiers#v1" + is_set: true + required: true + item_type: + type: String + item_validation: + function: 'validateDeletedPrincipals' + - name: 'approvalWorkflow' + type: NestedObject + description: | + The approvals needed before access will be granted to a requester. + No approvals will be needed if this field is null. Different types of approval workflows that can be used to gate privileged access granting. + immutable: true + properties: + - name: 'manualApprovals' + type: NestedObject + description: | + A manual approval workflow where users who are designated as approvers need to call the ApproveGrant/DenyGrant APIs for an Grant. + The workflow can consist of multiple serial steps where each step defines who can act as Approver in that step and how many of those users should approve before the workflow moves to the next step. + This can be used to create approval workflows such as + * Require an approval from any user in a group G. + * Require an approval from any k number of users from a Group G. + * Require an approval from any user in a group G and then from a user U. etc. + A single user might be part of `approvers` ACL for multiple steps in this workflow but they can only approve once and that approval will only be considered to satisfy the approval step at which it was granted. + required: true + properties: + - name: 'requireApproverJustification' + type: Boolean + description: | + Optional. Do the approvers need to provide a justification for their actions? + - name: 'steps' + type: Array + description: | + List of approval steps in this workflow. These steps would be followed in the specified order sequentially. 1 step is supported for now. + required: true + item_type: + type: NestedObject + properties: + - name: 'approvers' + type: Array + description: | + The potential set of approvers in this step. This list should contain at only one entry. + required: true + item_type: + type: NestedObject + properties: + - name: 'principals' + type: Array + description: | + Users who are being allowed for the operation. Each entry should be a valid v1 IAM Principal Identifier. Format for these is documented at: https://cloud.google.com/iam/docs/principal-identifiers#v1 + is_set: true + required: true + item_type: + type: String + min_size: 1 + item_validation: + function: 'validateDeletedPrincipals' + min_size: 1 + max_size: 1 + - name: 'approvalsNeeded' + type: Integer + description: | + How many users from the above list need to approve. + If there are not enough distinct users in the list above then the workflow + will indefinitely block. Should always be greater than 0. Currently 1 is the only + supported value. + - name: 'approverEmailRecipients' + type: Array + description: | + Optional. Additional email addresses to be notified when a grant is pending approval. + is_set: true + item_type: + type: String + - name: 'privilegedAccess' + type: NestedObject + description: | + Privileged access that this service can be used to gate. + required: true + properties: + - name: 'gcpIamAccess' + type: NestedObject + description: | + GcpIamAccess represents IAM based access control on a GCP resource. Refer to https://cloud.google.com/iam/docs to understand more about IAM. + required: true + properties: + - name: 'resourceType' + type: String + description: | + The type of this resource. + required: true + - name: 'resource' + type: String + description: | + Name of the resource. + required: true + - name: 'roleBindings' + type: Array + description: | + Role bindings to be created on successful grant. + required: true + item_type: + type: NestedObject + properties: + - name: 'role' + type: String + description: | + IAM role to be granted. https://cloud.google.com/iam/docs/roles-overview. + required: true + - name: 'conditionExpression' + type: String + description: | + The expression field of the IAM condition to be associated with the role. If specified, a user with an active grant for this entitlement would be able to access the resource only if this condition evaluates to true for their request. + https://cloud.google.com/iam/docs/conditions-overview#attributes. + - name: 'maxRequestDuration' + type: String + description: | + The maximum amount of time for which access would be granted for a request. + A requester can choose to ask for access for less than this duration but never more. + Format: calculate the time in seconds and concatenate it with 's' i.e. 2 hours = "7200s", 45 minutes = "2700s" + required: true + - name: 'state' + type: String + description: Output only. The current state of the Entitlement. + output: true + - name: 'etag' + type: Fingerprint + description: | + For Resource freshness validation (https://google.aip.dev/154) + output: true + - name: 'requesterJustificationConfig' + type: NestedObject + description: | + Defines the ways in which a requester should provide the justification while requesting for access. + required: true + send_empty_value: true + allow_empty_object: true + properties: + - name: 'notMandatory' + type: NestedObject + description: | + The justification is not mandatory but can be provided in any of the supported formats. + send_empty_value: true + allow_empty_object: true + conflicts: + - unstructured + properties: + [] + - name: 'unstructured' + type: NestedObject + description: | + The requester has to provide a justification in the form of free flowing text. + send_empty_value: true + allow_empty_object: true + conflicts: + - not_mandatory + properties: + [] + - name: 'additionalNotificationTargets' + type: NestedObject + description: | + AdditionalNotificationTargets includes email addresses to be notified. + send_empty_value: true + allow_empty_object: true + properties: + - name: 'adminEmailRecipients' + type: Array + description: | + Optional. Additional email addresses to be notified when a principal(requester) is granted access. + is_set: true + allow_empty_object: true + item_type: + type: String + - name: 'requesterEmailRecipients' + type: Array + description: | + Optional. Additional email address to be notified about an eligible entitlement. + is_set: true + allow_empty_object: true + item_type: + type: String diff --git a/mmv1/products/privilegedaccessmanager/go_product.yaml b/mmv1/products/privilegedaccessmanager/go_product.yaml new file mode 100644 index 000000000000..572441d32f96 --- /dev/null +++ b/mmv1/products/privilegedaccessmanager/go_product.yaml @@ -0,0 +1,36 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'PrivilegedAccessManager' +display_name: 'Privileged Access Manager' +versions: + - name: 'ga' + base_url: 'https://privilegedaccessmanager.googleapis.com/v1/' + - name: 'beta' + base_url: 'https://privilegedaccessmanager.googleapis.com/v1beta/' +scopes: + - 'https://www.googleapis.com/auth/cloud-platform' +async: + type: "OpAsync" + operation: + base_url: '{{op_id}}' + path: 'name' + wait_ms: 1000 + result: + path: 'response' + resource_inside_response: true + error: + path: 'error' + message: 'message' diff --git a/mmv1/products/publicca/go_ExternalAccountKey.yaml b/mmv1/products/publicca/go_ExternalAccountKey.yaml new file mode 100644 index 000000000000..d73efa8425e2 --- /dev/null +++ b/mmv1/products/publicca/go_ExternalAccountKey.yaml @@ -0,0 +1,81 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'ExternalAccountKey' +description: | + A representation of an ExternalAccountKey used for external account binding within ACME. +references: + guides: + 'Official Documentation': 'https://cloud.google.com/certificate-manager/docs/public-ca' + 'Request EAB key ID and HMAC': 'https://cloud.google.com/certificate-manager/docs/public-ca-tutorial#request-key-hmac' + 'Request Certificate Using Public CA': 'https://cloud.google.com/certificate-manager/docs/public-ca-tutorial' + api: 'https://cloud.google.com/certificate-manager/docs/reference/public-ca/rest/v1/projects.locations.externalAccountKeys/create' +docs: + warning: | + This resource is create-only and could not be read from the API. + On delete, the resource would be removed from the state. + You must use an EAB secret within 7 days of obtaining it. + The EAB secret is invalidated if you don't use it within 7 days. + The ACME account registered by using an EAB secret has no expiration. +id_format: '{{name}}' +base_url: 'projects/{{project}}/locations/{{location}}/externalAccountKeys' +self_link: 'projects/{{project}}/locations/{{location}}/externalAccountKeys/{{key_id}}' +create_url: 'projects/{{project}}/locations/{{location}}/externalAccountKeys' +skip_read: true +skip_delete: true +immutable: true +exclude_import: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +identity: + - name + - keyId + - b64MacKey +custom_code: +exclude_tgc: true +examples: + - name: 'public_ca_external_account_key' + primary_resource_id: 'prod' + test_env_vars: + project: 'PROJECT_NAME' + skip_import_test: true +parameters: +properties: + - name: 'name' + type: String + description: | + Resource name. projects/{project}/locations/{location}/externalAccountKeys/{keyId}. + output: true + - name: 'location' + type: String + description: | + Location for the externalAccountKey. Currently only `global` is supported. + url_param_only: true + default_value: "global" + - name: 'keyId' + type: String + description: | + It is generated by the PublicCertificateAuthorityService when the ExternalAccountKey is created. + sensitive: true + output: true + - name: 'b64MacKey' + type: String + description: | + Base64-URL-encoded HS256 key. It is generated by the PublicCertificateAuthorityService + when the ExternalAccountKey is created. + sensitive: true + output: true diff --git a/mmv1/products/publicca/go_product.yaml b/mmv1/products/publicca/go_product.yaml new file mode 100644 index 000000000000..bde6b0c1dbd0 --- /dev/null +++ b/mmv1/products/publicca/go_product.yaml @@ -0,0 +1,24 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'PublicCA' +display_name: 'Public ca' +versions: + - name: 'ga' + base_url: 'https://publicca.googleapis.com/v1/' + - name: 'beta' + base_url: 'https://publicca.googleapis.com/v1beta1/' +scopes: + - 'https://www.googleapis.com/auth/cloud-platform' diff --git a/mmv1/products/pubsublite/go_Reservation.yaml b/mmv1/products/pubsublite/go_Reservation.yaml new file mode 100644 index 000000000000..ab02b1190ebc --- /dev/null +++ b/mmv1/products/pubsublite/go_Reservation.yaml @@ -0,0 +1,59 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'Reservation' +description: | + A named resource representing a shared pool of capacity. +references: + guides: + 'Managing Reservations': 'https://cloud.google.com/pubsub/lite/docs/reservations' + api: 'https://cloud.google.com/pubsub/lite/docs/reference/rest/v1/admin.projects.locations.reservations' +docs: +base_url: 'projects/{{project}}/locations/{{region}}/reservations' +create_url: 'projects/{{project}}/locations/{{region}}/reservations?reservationId={{name}}' +update_url: 'projects/{{project}}/locations/{{region}}/reservations/{{name}}' +update_verb: 'PATCH' +update_mask: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +custom_code: +examples: + - name: 'pubsub_lite_reservation_basic' + primary_resource_id: 'example' + primary_resource_name: 'fmt.Sprintf("tf-test-example-reservation%s", context["random_suffix"])' + vars: + reservation_name: 'example-reservation' +parameters: + - name: 'region' + type: String + description: The region of the pubsub lite reservation. + url_param_only: true + - name: 'name' + type: String + description: 'Name of the reservation.' + url_param_only: true + required: true + immutable: true + diff_suppress_func: 'tpgresource.CompareSelfLinkOrResourceName' +properties: + - name: 'throughputCapacity' + type: Integer + description: | + The reserved throughput capacity. Every unit of throughput capacity is + equivalent to 1 MiB/s of published messages or 2 MiB/s of subscribed + messages. + required: true diff --git a/mmv1/products/pubsublite/go_Subscription.yaml b/mmv1/products/pubsublite/go_Subscription.yaml new file mode 100644 index 000000000000..fcfc3d710070 --- /dev/null +++ b/mmv1/products/pubsublite/go_Subscription.yaml @@ -0,0 +1,84 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'Subscription' +description: | + A named resource representing the stream of messages from a single, + specific topic, to be delivered to the subscribing application. +references: + guides: + 'Managing Subscriptions': 'https://cloud.google.com/pubsub/lite/docs/subscriptions' + api: 'https://cloud.google.com/pubsub/lite/docs/reference/rest/v1/admin.projects.locations.subscriptions' +docs: +base_url: 'projects/{{project}}/locations/{{zone}}/subscriptions' +create_url: 'projects/{{project}}/locations/{{zone}}/subscriptions?subscriptionId={{name}}' +update_url: 'projects/{{project}}/locations/{{zone}}/subscriptions/{{name}}' +update_verb: 'PATCH' +update_mask: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +custom_code: + encoder: 'templates/terraform/encoders/go/pubsub_lite.tmpl' +examples: + - name: 'pubsub_lite_subscription_basic' + primary_resource_id: 'example' + vars: + topic_name: 'example-topic' + subscription_name: 'example-subscription' +parameters: + - name: 'region' + type: String + description: The region of the pubsub lite topic. + url_param_only: true + - name: 'zone' + type: String + description: The zone of the pubsub lite topic. + url_param_only: true + - name: 'name' + type: String + description: 'Name of the subscription.' + url_param_only: true + required: true + immutable: true + diff_suppress_func: 'tpgresource.CompareSelfLinkOrResourceName' + custom_flatten: 'templates/terraform/custom_flatten/go/name_from_self_link.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/resource_from_self_link.go.tmpl' +properties: + - name: 'topic' + type: ResourceRef + description: | + A reference to a Topic resource. + required: true + immutable: true + diff_suppress_func: 'tpgresource.CompareSelfLinkOrResourceName' + custom_expand: 'templates/terraform/custom_expand/go/computed_lite_subscription_topic.tmpl' + resource: 'Topic' + imports: 'name' + - name: 'deliveryConfig' + type: NestedObject + description: | + The settings for this subscription's message delivery. + properties: + - name: 'deliveryRequirement' + type: Enum + description: | + When this subscription should send messages to subscribers relative to messages persistence in storage. + required: true + enum_values: + - 'DELIVER_IMMEDIATELY' + - 'DELIVER_AFTER_STORED' + - 'DELIVERY_REQUIREMENT_UNSPECIFIED' diff --git a/mmv1/products/pubsublite/go_Topic.yaml b/mmv1/products/pubsublite/go_Topic.yaml new file mode 100644 index 000000000000..0d1c97a866a5 --- /dev/null +++ b/mmv1/products/pubsublite/go_Topic.yaml @@ -0,0 +1,117 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'Topic' +description: | + A named resource to which messages are sent by publishers. +references: + guides: + 'Managing Topics': 'https://cloud.google.com/pubsub/lite/docs/topics' + api: 'https://cloud.google.com/pubsub/lite/docs/reference/rest/v1/admin.projects.locations.topics' +docs: +base_url: 'projects/{{project}}/locations/{{zone}}/topics' +create_url: 'projects/{{project}}/locations/{{zone}}/topics?topicId={{name}}' +update_url: 'projects/{{project}}/locations/{{zone}}/topics/{{name}}' +update_verb: 'PATCH' +update_mask: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +custom_code: + encoder: 'templates/terraform/encoders/go/pubsub_lite.tmpl' +examples: + - name: 'pubsub_lite_topic_basic' + primary_resource_id: 'example' + primary_resource_name: 'fmt.Sprintf("tf-test-example-topic%s", context["random_suffix"])' + vars: + reservation_name: 'example-reservation' + topic_name: 'example-topic' +parameters: + - name: 'region' + type: String + description: The region of the pubsub lite topic. + url_param_only: true + - name: 'zone' + type: String + description: The zone of the pubsub lite topic. + url_param_only: true + - name: 'name' + type: String + description: 'Name of the topic.' + url_param_only: true + required: true + immutable: true + diff_suppress_func: 'tpgresource.CompareSelfLinkOrResourceName' + custom_flatten: 'templates/terraform/custom_flatten/go/name_from_self_link.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/resource_from_self_link.go.tmpl' +properties: + - name: 'partitionConfig' + type: NestedObject + description: | + The settings for this topic's partitions. + properties: + - name: 'count' + type: Integer + description: | + The number of partitions in the topic. Must be at least 1. + required: true + - name: 'capacity' + type: NestedObject + description: | + The capacity configuration. + properties: + - name: 'publishMibPerSec' + type: Integer + description: | + Subscribe throughput capacity per partition in MiB/s. Must be >= 4 and <= 16. + required: true + - name: 'subscribeMibPerSec' + type: Integer + description: | + Publish throughput capacity per partition in MiB/s. Must be >= 4 and <= 16. + required: true + - name: 'retentionConfig' + type: NestedObject + description: | + The settings for a topic's message retention. + properties: + - name: 'perPartitionBytes' + type: String + description: | + The provisioned storage, in bytes, per partition. If the number of bytes stored + in any of the topic's partitions grows beyond this value, older messages will be + dropped to make room for newer ones, regardless of the value of period. + required: true + - name: 'period' + type: String + description: | + How long a published message is retained. If unset, messages will be retained as + long as the bytes retained for each partition is below perPartitionBytes. A + duration in seconds with up to nine fractional digits, terminated by 's'. + Example: "3.5s". + - name: 'reservationConfig' + type: NestedObject + description: | + The settings for this topic's Reservation usage. + properties: + - name: 'throughputReservation' + type: ResourceRef + description: | + The Reservation to use for this topic's throughput capacity. + diff_suppress_func: 'tpgresource.CompareSelfLinkOrResourceName' + custom_expand: 'templates/terraform/custom_expand/go/pubsublite_topic_reservation_config_throughput_reservation.go.tmpl' + resource: 'Reservation' + imports: 'name' diff --git a/mmv1/products/pubsublite/go_product.yaml b/mmv1/products/pubsublite/go_product.yaml new file mode 100644 index 000000000000..b0ad18a65041 --- /dev/null +++ b/mmv1/products/pubsublite/go_product.yaml @@ -0,0 +1,23 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'PubsubLite' +display_name: 'Cloud Pub/Sub' +versions: + - name: 'ga' + base_url: 'https://{{region}}-pubsublite.googleapis.com/v1/admin/' + cai_base_url: 'https://pubsublite.googleapis.com/v1/admin/' +scopes: + - 'https://www.googleapis.com/auth/cloud-platform' diff --git a/mmv1/products/redis/go_Cluster.yaml b/mmv1/products/redis/go_Cluster.yaml new file mode 100644 index 000000000000..5e643219507a --- /dev/null +++ b/mmv1/products/redis/go_Cluster.yaml @@ -0,0 +1,285 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'Cluster' +description: | + A Google Cloud Redis Cluster instance. +references: + guides: + 'Official Documentation': 'https://cloud.google.com/memorystore/docs/cluster/' + api: 'https://cloud.google.com/memorystore/docs/cluster/reference/rest/v1/projects.locations.clusters' +docs: +base_url: 'projects/{{project}}/locations/{{region}}/clusters' +self_link: 'projects/{{project}}/locations/{{region}}/clusters/{{name}}' +create_url: 'projects/{{project}}/locations/{{region}}/clusters?clusterId={{name}}' +update_verb: 'PATCH' +update_mask: true +timeouts: + insert_minutes: 60 + update_minutes: 120 + delete_minutes: 30 +autogen_async: true +async: + actions: ['create', 'delete', 'update'] + type: 'OpAsync' + operation: + base_url: '{{op_id}}' + result: + resource_inside_response: false +custom_code: +examples: + - name: 'redis_cluster_ha' + primary_resource_id: 'cluster-ha' + vars: + cluster_name: 'ha-cluster' + policy_name: 'mypolicy' + subnet_name: 'mysubnet' + network_name: 'mynetwork' + deletion_protection_enabled: 'true' + test_vars_overrides: + 'deletion_protection_enabled': 'false' + - name: 'redis_cluster_ha_single_zone' + primary_resource_id: 'cluster-ha-single-zone' + vars: + cluster_name: 'ha-cluster-single-zone' + policy_name: 'mypolicy' + subnet_name: 'mysubnet' + network_name: 'mynetwork' + deletion_protection_enabled: 'true' + test_vars_overrides: + 'deletion_protection_enabled': 'false' +parameters: + - name: 'name' + type: String + description: | + Unique name of the resource in this scope including project and location using the form: + projects/{projectId}/locations/{locationId}/clusters/{clusterId} + url_param_only: true + required: true + immutable: true + default_from_api: true + - name: 'region' + type: String + description: | + The name of the region of the Redis cluster. + url_param_only: true + required: false + immutable: true + ignore_read: true + default_from_api: true +properties: + - name: 'createTime' + type: Time + description: | + The timestamp associated with the cluster creation request. A timestamp in + RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional + digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + output: true + - name: 'state' + type: Enum + description: | + The current state of this cluster. Can be CREATING, READY, UPDATING, DELETING and SUSPENDED + immutable: true + output: true + enum_values: + - 'CREATING' + - 'READY' + - 'UPDATING' + - 'DELETING' + - 'SUSPENDED' + - name: 'uid' + type: String + description: | + System assigned, unique identifier for the cluster. + output: true + - name: 'authorizationMode' + type: Enum + description: | + Optional. The authorization mode of the Redis cluster. If not provided, auth feature is disabled for the cluster. + required: false + immutable: true + default_value: "AUTH_MODE_DISABLED" + enum_values: + - 'AUTH_MODE_UNSPECIFIED' + - 'AUTH_MODE_IAM_AUTH' + - 'AUTH_MODE_DISABLED' + - name: 'transitEncryptionMode' + type: Enum + description: | + Optional. The in-transit encryption for the Redis cluster. + If not provided, encryption is disabled for the cluster. + required: false + immutable: true + default_value: "TRANSIT_ENCRYPTION_MODE_DISABLED" + enum_values: + - 'TRANSIT_ENCRYPTION_MODE_UNSPECIFIED' + - 'TRANSIT_ENCRYPTION_MODE_DISABLED' + - 'TRANSIT_ENCRYPTION_MODE_SERVER_AUTHENTICATION' + - name: 'nodeType' + type: Enum + description: | + The nodeType for the Redis cluster. + If not provided, REDIS_HIGHMEM_MEDIUM will be used as default + required: false + immutable: true + default_from_api: true + enum_values: + - 'REDIS_SHARED_CORE_NANO' + - 'REDIS_HIGHMEM_MEDIUM' + - 'REDIS_HIGHMEM_XLARGE' + - 'REDIS_STANDARD_SMALL' + - name: 'zoneDistributionConfig' + type: NestedObject + description: Immutable. Zone distribution config for Memorystore Redis cluster. + immutable: true + properties: + - name: 'mode' + type: Enum + description: | + Immutable. The mode for zone distribution for Memorystore Redis cluster. + If not provided, MULTI_ZONE will be used as default + default_from_api: true + enum_values: + - 'MULTI_ZONE' + - 'SINGLE_ZONE' + - name: 'zone' + type: String + description: | + Immutable. The zone for single zone Memorystore Redis cluster. + - name: 'pscConfigs' + type: Array + description: | + Required. Each PscConfig configures the consumer network where two + network addresses will be designated to the cluster for client access. + Currently, only one PscConfig is supported. + required: true + ignore_read: true + item_type: + type: NestedObject + properties: + - name: 'network' + type: String + description: | + Required. The consumer network where the network address of + the discovery endpoint will be reserved, in the form of + projects/{network_project_id_or_number}/global/networks/{network_id}. + required: true + - name: 'discoveryEndpoints' + type: Array + description: | + Output only. Endpoints created on each given network, + for Redis clients to connect to the cluster. + Currently only one endpoint is supported. + output: true + item_type: + type: NestedObject + properties: + - name: 'address' + type: String + description: | + Output only. Network address of the exposed Redis endpoint used by clients to connect to the service. + - name: 'port' + type: Integer + description: | + Output only. The port number of the exposed Redis endpoint. + - name: 'pscConfig' + type: NestedObject + description: | + Output only. Customer configuration for where the endpoint + is created and accessed from. + properties: + - name: 'network' + type: String + description: | + The consumer network where the network address of the discovery + endpoint will be reserved, in the form of + projects/{network_project_id}/global/networks/{network_id}. + - name: 'pscConnections' + type: Array + description: | + Output only. PSC connections for discovery of the cluster topology and accessing the cluster. + output: true + item_type: + type: NestedObject + properties: + - name: 'pscConnectionId' + type: String + description: | + Output only. The PSC connection id of the forwarding rule connected to the service attachment. + - name: 'address' + type: String + description: | + Output only. The IP allocated on the consumer network for the PSC forwarding rule. + - name: 'forwardingRule' + type: String + description: | + Output only. The URI of the consumer side forwarding rule. Example: projects/{projectNumOrId}/regions/us-east1/forwardingRules/{resourceId}. + - name: 'projectId' + type: String + description: | + Output only. The consumer projectId where the forwarding rule is created from. + - name: 'network' + type: String + description: | + The consumer network where the IP address resides, in the form of projects/{projectId}/global/networks/{network_id}. + - name: 'stateInfo' + type: NestedObject + description: Output only. Additional information about the current state of the cluster. + output: true + properties: + - name: 'updateInfo' + type: NestedObject + properties: + - name: 'targetShardCount' + type: Integer + description: Target number of shards for redis cluster. + - name: 'targetReplicaCount' + type: Integer + description: Target number of replica nodes per shard. + - name: 'replicaCount' + type: Integer + description: | + Optional. The number of replica nodes per shard. + required: false + send_empty_value: true + - name: 'sizeGb' + type: Integer + description: | + Output only. Redis memory size in GB for the entire cluster. + output: true + - name: 'preciseSizeGb' + type: Double + description: | + Output only. Redis memory precise size in GB for the entire cluster. + output: true + - name: 'shardCount' + type: Integer + description: | + Required. Number of shards for the Redis cluster. + required: true + - name: 'deletionProtectionEnabled' + type: Boolean + description: | + Optional. Indicates if the cluster is deletion protected or not. + If the value if set to true, any delete cluster operation will fail. + Default value is true. + required: false + default_value: true + - name: 'redisConfigs' + type: KeyValuePairs + description: | + Configure Redis Cluster behavior using a subset of native Redis configuration parameters. + Please check Memorystore documentation for the list of supported parameters: + https://cloud.google.com/memorystore/docs/cluster/supported-instance-configurations diff --git a/mmv1/products/redis/go_Instance.yaml b/mmv1/products/redis/go_Instance.yaml new file mode 100644 index 000000000000..5eebc8ced133 --- /dev/null +++ b/mmv1/products/redis/go_Instance.yaml @@ -0,0 +1,552 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'Instance' +description: | + A Google Cloud Redis instance. +references: + guides: + 'Official Documentation': 'https://cloud.google.com/memorystore/docs/redis/' + api: 'https://cloud.google.com/memorystore/docs/redis/reference/rest/v1/projects.locations.instances' +docs: +base_url: 'projects/{{project}}/locations/{{region}}/instances' +create_url: 'projects/{{project}}/locations/{{region}}/instances?instanceId={{name}}' +update_verb: 'PATCH' +update_mask: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +autogen_async: true +async: + actions: ['create', 'delete', 'update'] + type: 'OpAsync' + operation: + base_url: '{{op_id}}' + result: + resource_inside_response: false +custom_code: + extra_schema_entry: 'templates/terraform/extra_schema_entry/go/redis_instance.tmpl' + constants: 'templates/terraform/constants/go/redis_instance.go.tmpl' + encoder: 'templates/terraform/encoders/go/redis_location_id_for_fallback_zone.go.tmpl' + decoder: 'templates/terraform/decoders/go/redis_instance.go.tmpl' +custom_diff: + - 'customdiff.ForceNewIfChange("redis_version", isRedisVersionDecreasing)' + - 'tpgresource.DefaultProviderProject' +skip_default_cdiff: true +examples: + - name: 'redis_instance_basic' + primary_resource_id: 'cache' + vars: + instance_name: 'memory-cache' + prevent_destroy: 'true' + test_vars_overrides: + 'prevent_destroy': 'false' + - name: 'redis_instance_full' + primary_resource_id: 'cache' + vars: + instance_name: 'ha-memory-cache' + network_name: 'redis-test-network' + prevent_destroy: 'true' + test_vars_overrides: + 'network_name': 'acctest.BootstrapSharedTestNetwork(t, "redis-full")' + 'prevent_destroy': 'false' + - name: 'redis_instance_full_with_persistence_config' + primary_resource_id: 'cache-persis' + vars: + instance_name: 'ha-memory-cache-persis' + network_name: 'redis-test-network' + prevent_destroy: 'true' + test_vars_overrides: + 'network_name': 'acctest.BootstrapSharedTestNetwork(t, "redis-full-persis")' + 'prevent_destroy': 'false' + - name: 'redis_instance_private_service' + primary_resource_id: 'cache' + vars: + instance_name: 'private-cache' + address_name: 'address' + network_name: 'redis-test-network' + prevent_destroy: 'true' + test_vars_overrides: + 'prevent_destroy': 'false' + skip_test: true + - name: 'redis_instance_private_service_test' + primary_resource_id: 'cache' + vars: + instance_name: 'private-cache' + network_name: 'redis-test-network' + prevent_destroy: 'true' + test_vars_overrides: + 'network_name': 'acctest.BootstrapSharedServiceNetworkingConnection(t, "vpc-network-1")' + 'prevent_destroy': 'false' + skip_docs: true + - name: 'redis_instance_mrr' + primary_resource_id: 'cache' + vars: + instance_name: 'mrr-memory-cache' + network_name: 'redis-test-network' + prevent_destroy: 'true' + test_vars_overrides: + 'network_name': 'acctest.BootstrapSharedTestNetwork(t, "redis-mrr")' + 'prevent_destroy': 'false' + - name: 'redis_instance_cmek' + primary_resource_id: 'cache' + vars: + instance_name: 'cmek-memory-cache' + network_name: 'redis-test-network' + prevent_destroy: 'true' + test_vars_overrides: + 'network_name': 'acctest.BootstrapSharedTestNetwork(t, "redis-cmek")' + 'prevent_destroy': 'false' + skip_test: true +parameters: + - name: 'region' + type: String + description: | + The name of the Redis region of the instance. + url_param_only: true + required: false + immutable: true + ignore_read: true + default_from_api: true +properties: + - name: 'alternativeLocationId' + type: String + description: | + Only applicable to STANDARD_HA tier which protects the instance + against zonal failures by provisioning it across two zones. + If provided, it must be a different zone from the one provided in + [locationId]. + immutable: true + default_from_api: true + - name: 'authEnabled' + type: Boolean + description: | + Optional. Indicates whether OSS Redis AUTH is enabled for the + instance. If set to "true" AUTH is enabled on the instance. + Default value is "false" meaning AUTH is disabled. + default_value: false + - name: 'authorizedNetwork' + type: String + description: | + The full name of the Google Compute Engine network to which the + instance is connected. If left unspecified, the default network + will be used. + immutable: true + default_from_api: true + diff_suppress_func: 'tpgresource.CompareSelfLinkOrResourceName' + custom_expand: 'templates/terraform/custom_expand/go/redis_instance_authorized_network.tmpl' + - name: 'connectMode' + type: Enum + description: | + The connection mode of the Redis instance. + immutable: true + default_value: "DIRECT_PEERING" + enum_values: + - 'DIRECT_PEERING' + - 'PRIVATE_SERVICE_ACCESS' + - name: 'createTime' + type: Time + description: | + The time the instance was created in RFC3339 UTC "Zulu" format, + accurate to nanoseconds. + output: true + - name: 'currentLocationId' + type: String + description: | + The current zone where the Redis endpoint is placed. + For Basic Tier instances, this will always be the same as the + [locationId] provided by the user at creation time. For Standard Tier + instances, this can be either [locationId] or [alternativeLocationId] + and can change after a failover event. + output: true + - name: 'displayName' + type: String + description: | + An arbitrary and optional user-provided name for the instance. + - name: 'host' + type: String + description: | + Hostname or IP address of the exposed Redis endpoint used by clients + to connect to the service. + output: true + - name: 'labels' + type: KeyValueLabels + description: Resource labels to represent user provided metadata. + - name: 'redisConfigs' + type: KeyValuePairs + description: | + Redis configuration parameters, according to http://redis.io/topics/config. + Please check Memorystore documentation for the list of supported parameters: + https://cloud.google.com/memorystore/docs/redis/reference/rest/v1/projects.locations.instances#Instance.FIELDS.redis_configs + - name: 'locationId' + type: String + description: | + The zone where the instance will be provisioned. If not provided, + the service will choose a zone for the instance. For STANDARD_HA tier, + instances will be created across two zones for protection against + zonal failures. If [alternativeLocationId] is also provided, it must + be different from [locationId]. + immutable: true + default_from_api: true + - name: 'name' + type: String + description: | + The ID of the instance or a fully qualified identifier for the instance. + required: true + immutable: true + custom_flatten: 'templates/terraform/custom_flatten/go/name_from_self_link.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/shortname_to_url.go.tmpl' + validation: + regex: '^[a-z][a-z0-9-]{0,39}[a-z0-9]$' + - name: 'persistenceConfig' + type: NestedObject + description: Persistence configuration for an instance. + default_from_api: true + properties: + - name: 'persistenceMode' + type: Enum + description: | + Optional. Controls whether Persistence features are enabled. If not provided, the existing value will be used. + + - DISABLED: Persistence is disabled for the instance, and any existing snapshots are deleted. + - RDB: RDB based Persistence is enabled. + required: true + default_from_api: true + enum_values: + - 'DISABLED' + - 'RDB' + - name: 'rdbSnapshotPeriod' + type: Enum + description: | + Optional. Available snapshot periods for scheduling. + + - ONE_HOUR: Snapshot every 1 hour. + - SIX_HOURS: Snapshot every 6 hours. + - TWELVE_HOURS: Snapshot every 12 hours. + - TWENTY_FOUR_HOURS: Snapshot every 24 hours. + required: false + enum_values: + - 'ONE_HOUR' + - 'SIX_HOURS' + - 'TWELVE_HOURS' + - 'TWENTY_FOUR_HOURS' + - name: 'rdbNextSnapshotTime' + type: String + description: | + Output only. The next time that a snapshot attempt is scheduled to occur. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up + to nine fractional digits. + Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + output: true + - name: 'rdbSnapshotStartTime' + type: String + description: | + Optional. Date and time that the first snapshot was/will be attempted, + and to which future snapshots will be aligned. If not provided, + the current time will be used. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution + and up to nine fractional digits. + Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + default_from_api: true + - name: 'maintenancePolicy' + type: NestedObject + description: Maintenance policy for an instance. + properties: + - name: 'createTime' + type: String + description: | + Output only. The time when the policy was created. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + resolution and up to nine fractional digits. + output: true + - name: 'updateTime' + type: String + description: | + Output only. The time when the policy was last updated. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + resolution and up to nine fractional digits. + output: true + - name: 'description' + type: String + description: | + Optional. Description of what this policy is for. + Create/Update methods return INVALID_ARGUMENT if the + length is greater than 512. + - name: 'weeklyMaintenanceWindow' + type: Array + description: | + Optional. Maintenance window that is applied to resources covered by this policy. + Minimum 1. For the current version, the maximum number + of weekly_window is expected to be one. + item_type: + type: NestedObject + properties: + - name: 'day' + type: Enum + description: | + Required. The day of week that maintenance updates occur. + + - DAY_OF_WEEK_UNSPECIFIED: The day of the week is unspecified. + - MONDAY: Monday + - TUESDAY: Tuesday + - WEDNESDAY: Wednesday + - THURSDAY: Thursday + - FRIDAY: Friday + - SATURDAY: Saturday + - SUNDAY: Sunday + required: true + enum_values: + - 'DAY_OF_WEEK_UNSPECIFIED' + - 'MONDAY' + - 'TUESDAY' + - 'WEDNESDAY' + - 'THURSDAY' + - 'FRIDAY' + - 'SATURDAY' + - 'SUNDAY' + - name: 'duration' + type: String + description: | + Output only. Duration of the maintenance window. + The current window is fixed at 1 hour. + A duration in seconds with up to nine fractional digits, + terminated by 's'. Example: "3.5s". + output: true + - name: 'startTime' + type: NestedObject + description: | + Required. Start time of the window in UTC time. + required: true + send_empty_value: true + allow_empty_object: true + properties: + - name: 'hours' + type: Integer + description: | + Hours of day in 24 hour format. Should be from 0 to 23. + An API may choose to allow the value "24:00:00" for scenarios like business closing time. + validation: + function: 'validation.IntBetween(0,23)' + - name: 'minutes' + type: Integer + description: | + Minutes of hour of day. Must be from 0 to 59. + validation: + function: 'validation.IntBetween(0,59)' + - name: 'seconds' + type: Integer + description: | + Seconds of minutes of the time. Must normally be from 0 to 59. + An API may allow the value 60 if it allows leap-seconds. + validation: + function: 'validation.IntBetween(0,60)' + - name: 'nanos' + type: Integer + description: | + Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. + validation: + function: 'validation.IntBetween(0,999999999)' + - name: 'maintenanceSchedule' + type: NestedObject + description: Upcoming maintenance schedule. + output: true + properties: + - name: 'startTime' + type: String + description: | + Output only. The start time of any upcoming scheduled maintenance for this instance. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + resolution and up to nine fractional digits. + output: true + - name: 'endTime' + type: String + description: | + Output only. The end time of any upcoming scheduled maintenance for this instance. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + resolution and up to nine fractional digits. + output: true + - name: 'scheduleDeadlineTime' + type: String + description: | + Output only. The deadline that the maintenance schedule start time + can not go beyond, including reschedule. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + resolution and up to nine fractional digits. + output: true + - name: 'maintenanceVersion' + type: String + description: The self service update maintenance version. + required: false + default_from_api: true + - name: 'memorySizeGb' + type: Integer + description: Redis memory size in GiB. + required: true + - name: 'port' + type: Integer + description: The port number of the exposed Redis endpoint. + output: true + - name: 'persistenceIamIdentity' + type: String + description: | + Output only. Cloud IAM identity used by import / export operations + to transfer data to/from Cloud Storage. Format is "serviceAccount:". + The value may change over time for a given instance so should be + checked before each import/export operation. + output: true + - name: 'redisVersion' + type: String + description: | + The version of Redis software. If not provided, latest supported + version will be used. Please check the API documentation linked + at the top for the latest valid values. + default_from_api: true + update_url: 'projects/{{project}}/locations/{{region}}/instances/{{name}}:upgrade' + update_verb: 'POST' + - name: 'reservedIpRange' + type: String + description: | + The CIDR range of internal addresses that are reserved for this + instance. If not provided, the service will choose an unused /29 + block, for example, 10.0.0.0/29 or 192.168.0.0/29. Ranges must be + unique and non-overlapping with existing subnets in an authorized + network. + immutable: true + ignore_read: true + default_from_api: true + - name: 'tier' + type: Enum + description: | + The service tier of the instance. Must be one of these values: + + - BASIC: standalone instance + - STANDARD_HA: highly available primary/replica instances + immutable: true + default_value: "BASIC" + enum_values: + - 'BASIC' + - 'STANDARD_HA' + - name: 'transitEncryptionMode' + type: Enum + description: | + The TLS mode of the Redis instance, If not provided, TLS is disabled for the instance. + + - SERVER_AUTHENTICATION: Client to Server traffic encryption enabled with server authentication + immutable: true + default_value: "DISABLED" + enum_values: + - 'SERVER_AUTHENTICATION' + - 'DISABLED' + - name: 'serverCaCerts' + type: Array + description: | + List of server CA certificates for the instance. + output: true + item_type: + type: NestedObject + properties: + - name: 'serialNumber' + type: String + description: | + Serial number, as extracted from the certificate. + output: true + - name: 'cert' + type: String + description: | + The certificate data in PEM format. + output: true + - name: 'createTime' + type: String + description: | + The time when the certificate was created. + output: true + - name: 'expireTime' + type: String + description: | + The time when the certificate expires. + output: true + - name: 'sha1Fingerprint' + type: String + description: | + Sha1 Fingerprint of the certificate. + output: true + - name: 'replicaCount' + type: Integer + description: | + Optional. The number of replica nodes. The valid range for the Standard Tier with + read replicas enabled is [1-5] and defaults to 2. If read replicas are not enabled + for a Standard Tier instance, the only valid value is 1 and the default is 1. + The valid value for basic tier is 0 and the default is also 0. + default_from_api: true + - name: 'nodes' + type: Array + description: | + Output only. Info per node. + output: true + item_type: + type: NestedObject + properties: + - name: 'id' + type: String + description: | + Node identifying string. e.g. 'node-0', 'node-1' + output: true + - name: 'zone' + type: String + description: | + Location of the node. + output: true + - name: 'readEndpoint' + type: String + description: | + Output only. Hostname or IP address of the exposed readonly Redis endpoint. Standard tier only. + Targets all healthy replica nodes in instance. Replication is asynchronous and replica nodes + will exhibit some lag behind the primary. Write requests must target 'host'. + output: true + - name: 'readEndpointPort' + type: Integer + description: | + Output only. The port number of the exposed readonly redis endpoint. Standard tier only. + Write requests should target 'port'. + output: true + - name: 'readReplicasMode' + type: Enum + description: | + Optional. Read replica mode. Can only be specified when trying to create the instance. + If not set, Memorystore Redis backend will default to READ_REPLICAS_DISABLED. + - READ_REPLICAS_DISABLED: If disabled, read endpoint will not be provided and the + instance cannot scale up or down the number of replicas. + - READ_REPLICAS_ENABLED: If enabled, read endpoint will be provided and the instance + can scale up and down the number of replicas. + default_from_api: true + enum_values: + - 'READ_REPLICAS_DISABLED' + - 'READ_REPLICAS_ENABLED' + - name: 'secondaryIpRange' + type: String + description: | + Optional. Additional IP range for node placement. Required when enabling read replicas on + an existing instance. For DIRECT_PEERING mode value must be a CIDR range of size /28, or + "auto". For PRIVATE_SERVICE_ACCESS mode value must be the name of an allocated address + range associated with the private service access connection, or "auto". + default_from_api: true + diff_suppress_func: 'secondaryIpDiffSuppress' + - name: 'customerManagedKey' + type: String + description: | + Optional. The KMS key reference that you want to use to encrypt the data at rest for this Redis + instance. If this is provided, CMEK is enabled. + immutable: true diff --git a/mmv1/products/redis/go_product.yaml b/mmv1/products/redis/go_product.yaml new file mode 100644 index 000000000000..429404a4f6d9 --- /dev/null +++ b/mmv1/products/redis/go_product.yaml @@ -0,0 +1,30 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'Redis' +display_name: 'Memorystore (Redis)' +versions: + - name: 'ga' + base_url: 'https://redis.googleapis.com/v1/' + - name: 'beta' + base_url: 'https://redis.googleapis.com/v1beta1/' +scopes: + - 'https://www.googleapis.com/auth/cloud-platform' +async: + type: "OpAsync" + operation: + base_url: '{{op_id}}' + result: + resource_inside_response: false diff --git a/mmv1/products/resourcemanager/go_Lien.yaml b/mmv1/products/resourcemanager/go_Lien.yaml new file mode 100644 index 000000000000..c970cecd2cef --- /dev/null +++ b/mmv1/products/resourcemanager/go_Lien.yaml @@ -0,0 +1,95 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'Lien' +description: + A Lien represents an encumbrance on the actions that can be performed on a + resource. +docs: +id_format: '{{name}}' +base_url: 'liens' +self_link: 'liens?parent={{parent}}' +immutable: true +import_format: + - '{{parent}}/{{name}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +identity: + - name +nested_query: + keys: + - liens + is_list_of_ids: false + modify_by_patch: false +custom_code: + decoder: 'templates/terraform/decoders/go/avoid_meaningless_project_update.tmpl' + post_create: 'templates/terraform/post_create/go/lien.tmpl' + pre_delete: 'templates/terraform/pre_delete/go/modify_delete_url.tmpl' + post_import: 'templates/terraform/post_import/go/lien_import.tmpl' +examples: + - name: 'resource_manager_lien' + primary_resource_id: 'lien' + vars: + project_id: 'staging-project' + skip_test: true +parameters: + - name: 'parent' + type: String + description: | + A reference to the resource this Lien is attached to. + The server will validate the parent against those for which Liens are supported. + Since a variety of objects can have Liens against them, you must provide the type + prefix (e.g. "projects/my-project-name"). + required: true + immutable: true + - name: 'restrictions' + type: Array + description: | + The types of operations which should be blocked as a result of this Lien. + Each value should correspond to an IAM permission. The server will validate + the permissions against those for which Liens are supported. An empty + list is meaningless and will be rejected. + e.g. ['resourcemanager.projects.delete'] + required: true + immutable: true + item_type: + type: String +properties: + - name: 'name' + type: String + description: A system-generated unique identifier for this Lien. + output: true + custom_flatten: 'templates/terraform/custom_flatten/go/name_from_self_link.tmpl' + - name: 'reason' + type: String + description: | + Concise user-visible strings indicating why an action cannot be performed + on a resource. Maximum length of 200 characters. + required: true + immutable: true + - name: 'origin' + type: String + description: | + A stable, user-visible/meaningful string identifying the origin + of the Lien, intended to be inspected programmatically. Maximum length of + 200 characters. + required: true + immutable: true + - name: 'createTime' + type: Time + description: 'Time of creation' + output: true diff --git a/mmv1/products/resourcemanager/go_product.yaml b/mmv1/products/resourcemanager/go_product.yaml new file mode 100644 index 000000000000..b96ab4ecd938 --- /dev/null +++ b/mmv1/products/resourcemanager/go_product.yaml @@ -0,0 +1,22 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'ResourceManager' +display_name: 'Resource Manager' +versions: + - name: 'ga' + base_url: 'https://cloudresourcemanager.googleapis.com/v1/' +scopes: + - 'https://www.googleapis.com/auth/cloud-platform' diff --git a/mmv1/products/runtimeconfig/go_Config.yaml b/mmv1/products/runtimeconfig/go_Config.yaml new file mode 100644 index 000000000000..a9ef94bb99d9 --- /dev/null +++ b/mmv1/products/runtimeconfig/go_Config.yaml @@ -0,0 +1,57 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'Config' +description: | + A RuntimeConfig resource is the primary resource in the Cloud RuntimeConfig service. + A RuntimeConfig resource consists of metadata and a hierarchy of variables. +min_version: 'beta' +exclude_resource: true +docs: +base_url: 'projects/{{project}}/configs' +self_link: 'projects/{{project}}/configs/{{name}}' +import_format: + - 'projects/{{project}}/configs/{{config}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +iam_policy: + method_name_separator: ':' + parent_resource_attribute: 'config' + example_config_body: 'templates/terraform/iam/go/iam_attributes.go.tmpl' +custom_code: +exclude_tgc: true +examples: + - name: 'runtimeconfig_config_basic' + primary_resource_id: 'config' + primary_resource_name: 'fmt.Sprintf("tf-test-my-config%s", context["random_suffix"])' + min_version: 'beta' + vars: + config_name: 'my-config' +parameters: + - name: 'name' + type: String + description: | + The name of the runtime config. + min_version: 'beta' + required: true + immutable: true +properties: + - name: 'description' + type: String + description: | + The description to associate with the runtime config. + min_version: 'beta' diff --git a/mmv1/products/runtimeconfig/go_product.yaml b/mmv1/products/runtimeconfig/go_product.yaml new file mode 100644 index 000000000000..adc267f34755 --- /dev/null +++ b/mmv1/products/runtimeconfig/go_product.yaml @@ -0,0 +1,23 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'RuntimeConfig' +legacy_name: 'runtimeconfig' +display_name: 'Runtime Configurator' +versions: + - name: 'beta' + base_url: 'https://runtimeconfig.googleapis.com/v1beta1/' +scopes: + - 'https://www.googleapis.com/auth/cloudruntimeconfig' diff --git a/mmv1/products/secretmanager/go_Secret.yaml b/mmv1/products/secretmanager/go_Secret.yaml new file mode 100644 index 000000000000..aacd0c5b9f4f --- /dev/null +++ b/mmv1/products/secretmanager/go_Secret.yaml @@ -0,0 +1,250 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'Secret' +description: | + A Secret is a logical secret whose value and versions can be accessed. +references: + guides: + api: 'https://cloud.google.com/secret-manager/docs/reference/rest/v1/projects.secrets' +docs: +base_url: 'projects/{{project}}/secrets' +self_link: 'projects/{{project}}/secrets/{{secret_id}}' +create_url: 'projects/{{project}}/secrets?secretId={{secret_id}}' +update_verb: 'PATCH' +update_mask: true +import_format: + - 'projects/{{project}}/secrets/{{secret_id}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +iam_policy: + method_name_separator: ':' + allowed_iam_role: 'roles/secretmanager.secretAccessor' + parent_resource_attribute: 'secret_id' + iam_conditions_request_type: 'QUERY_PARAM_NESTED' + example_config_body: 'templates/terraform/iam/go/iam_attributes.go.tmpl' +custom_code: + constants: 'templates/terraform/constants/go/secret_manager_secret.go.tmpl' + pre_update: 'templates/terraform/pre_update/go/secret_manager_secret.go.tmpl' +custom_diff: + - 'secretManagerSecretAutoCustomizeDiff' +examples: + - name: 'secret_config_basic' + primary_resource_id: 'secret-basic' + primary_resource_name: 'fmt.Sprintf("secret%s", context["random_suffix"])' + vars: + secret_id: 'secret' + - name: 'secret_with_annotations' + primary_resource_id: 'secret-with-annotations' + vars: + secret_id: 'secret' + - name: 'secret_with_version_destroy_ttl' + primary_resource_id: 'secret-with-version-destroy-ttl' + vars: + secret_id: 'secret' + - name: 'secret_with_automatic_cmek' + primary_resource_id: 'secret-with-automatic-cmek' + vars: + secret_id: 'secret' + kms_key_name: 'kms-key' + test_vars_overrides: + 'kms_key_name': 'acctest.BootstrapKMSKey(t).CryptoKey.Name' +parameters: + - name: 'secretId' + type: String + description: | + This must be unique within the project. + url_param_only: true + required: true + immutable: true +properties: + - name: 'name' + type: String + description: | + The resource name of the Secret. Format: + `projects/{{project}}/secrets/{{secret_id}}` + output: true + - name: 'createTime' + type: String + description: | + The time at which the Secret was created. + output: true + - name: 'labels' + type: KeyValueLabels + description: | + The labels assigned to this Secret. + + Label keys must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 bytes, + and must conform to the following PCRE regular expression: [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62} + + Label values must be between 0 and 63 characters long, have a UTF-8 encoding of maximum 128 bytes, + and must conform to the following PCRE regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63} + + No more than 64 labels can be assigned to a given resource. + + An object containing a list of "key": value pairs. Example: + { "name": "wrench", "mass": "1.3kg", "count": "3" }. + - name: 'annotations' + type: KeyValueAnnotations + description: | + Custom metadata about the secret. + + Annotations are distinct from various forms of labels. Annotations exist to allow + client tools to store their own state information without requiring a database. + + Annotation keys must be between 1 and 63 characters long, have a UTF-8 encoding of + maximum 128 bytes, begin and end with an alphanumeric character ([a-z0-9A-Z]), and + may have dashes (-), underscores (_), dots (.), and alphanumerics in between these + symbols. + + The total size of annotation keys and values must be less than 16KiB. + + An object containing a list of "key": value pairs. Example: + { "name": "wrench", "mass": "1.3kg", "count": "3" }. + - name: 'versionAliases' + type: KeyValuePairs + description: | + Mapping from version alias to version name. + + A version alias is a string with a maximum length of 63 characters and can contain + uppercase and lowercase letters, numerals, and the hyphen (-) and underscore ('_') + characters. An alias string must start with a letter and cannot be the string + 'latest' or 'NEW'. No more than 50 aliases can be assigned to a given secret. + + An object containing a list of "key": value pairs. Example: + { "name": "wrench", "mass": "1.3kg", "count": "3" }. + - name: 'versionDestroyTtl' + type: String + description: | + Secret Version TTL after destruction request. + This is a part of the delayed delete feature on Secret Version. + For secret with versionDestroyTtl>0, version destruction doesn't happen immediately + on calling destroy instead the version goes to a disabled state and + the actual destruction happens after this TTL expires. + - name: 'replication' + type: NestedObject + description: | + The replication policy of the secret data attached to the Secret. It cannot be changed + after the Secret has been created. + required: true + immutable: true + properties: + - name: 'auto' + type: NestedObject + description: | + The Secret will automatically be replicated without any restrictions. + api_name: automatic + immutable: true + send_empty_value: true + allow_empty_object: true + exactly_one_of: + - 'replication.0.user_managed' + - 'replication.0.auto' + properties: + - name: 'customerManagedEncryption' + type: NestedObject + description: | + The customer-managed encryption configuration of the Secret. + If no configuration is provided, Google-managed default + encryption is used. + properties: + - name: 'kmsKeyName' + type: String + description: | + The resource name of the Cloud KMS CryptoKey used to encrypt secret payloads. + required: true + - name: 'userManaged' + type: NestedObject + description: | + The Secret will be replicated to the regions specified by the user. + immutable: true + exactly_one_of: + - 'replication.0.user_managed' + - 'replication.0.auto' + properties: + - name: 'replicas' + type: Array + description: | + The list of Replicas for this Secret. Cannot be empty. + required: true + immutable: true + item_type: + type: NestedObject + properties: + - name: 'location' + type: String + description: | + The canonical IDs of the location to replicate data. For example: "us-east1". + required: true + immutable: true + - name: 'customerManagedEncryption' + type: NestedObject + description: | + Customer Managed Encryption for the secret. + properties: + - name: 'kmsKeyName' + type: String + description: | + Describes the Cloud KMS encryption key that will be used to protect destination secret. + required: true + min_size: 1 + - name: 'topics' + type: Array + description: | + A list of up to 10 Pub/Sub topics to which messages are published when control plane operations are called on the secret or its versions. + item_type: + type: NestedObject + properties: + - name: 'name' + type: String + description: | + The resource name of the Pub/Sub topic that will be published to, in the following format: projects/*/topics/*. + For publication to succeed, the Secret Manager Service Agent service account must have pubsub.publisher permissions on the topic. + required: true + - name: 'expireTime' + type: String + description: | + Timestamp in UTC when the Secret is scheduled to expire. This is always provided on output, regardless of what was sent on input. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + Only one of `expire_time` or `ttl` can be provided. + default_from_api: true + - name: 'ttl' + type: String + description: | + The TTL for the Secret. + A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s". + Only one of `ttl` or `expire_time` can be provided. + ignore_read: true + - name: 'rotation' + type: NestedObject + description: | + The rotation time and period for a Secret. At `next_rotation_time`, Secret Manager will send a Pub/Sub notification to the topics configured on the Secret. `topics` must be set to configure rotation. + required_with: + - 'topics' + properties: + - name: 'nextRotationTime' + type: String + description: | + Timestamp in UTC at which the Secret is scheduled to rotate. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + required_with: + - 'rotation.0.rotation_period' + - name: 'rotationPeriod' + type: String + description: | + The Duration between rotation notifications. Must be in seconds and at least 3600s (1h) and at most 3153600000s (100 years). + If rotationPeriod is set, `next_rotation_time` must be set. `next_rotation_time` will be advanced by this period when the service automatically sends rotation notifications. diff --git a/mmv1/products/secretmanager/go_SecretVersion.yaml b/mmv1/products/secretmanager/go_SecretVersion.yaml new file mode 100644 index 000000000000..adedf121dc63 --- /dev/null +++ b/mmv1/products/secretmanager/go_SecretVersion.yaml @@ -0,0 +1,139 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'SecretVersion' +description: | + A secret version resource. +# Sweeper skipped as this resource has customized deletion. +docs: + optional_properties: | + * `is_secret_data_base64` - (Optional) If set to 'true', the secret data is expected to be base64-encoded string and would be sent as is. +base_url: '{{name}}' +self_link: '{{name}}' +create_url: '{{secret}}:addVersion' +delete_url: '{{name}}:destroy' +delete_verb: 'POST' +import_format: + - 'projects/{{%project}}/secrets/{{%secret_id}}/versions/{{%version}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +custom_code: + extra_schema_entry: 'templates/terraform/extra_schema_entry/go/secret_version_is_secret_data_base64.go.tmpl' + decoder: 'templates/terraform/decoders/go/treat_destroyed_state_as_gone.tmpl' + post_create: 'templates/terraform/post_create/go/secret_version.go.tmpl' + pre_read: 'templates/terraform/pre_read/go/secret_version_is_secret_data_base64.go.tmpl' + custom_update: 'templates/terraform/custom_update/go/secret_version.go.tmpl' + pre_delete: 'templates/terraform/pre_delete/go/secret_version_deletion_policy.go.tmpl' + custom_import: 'templates/terraform/custom_import/go/secret_version.go.tmpl' +skip_sweeper: true +examples: + - name: 'secret_version_basic' + primary_resource_id: 'secret-version-basic' + vars: + secret_id: 'secret-version' + data: 'secret-data' + - name: 'secret_version_deletion_policy_abandon' + primary_resource_id: 'secret-version-deletion-policy' + vars: + secret_id: 'secret-version' + data: 'secret-data' + ignore_read_extra: + - 'deletion_policy' + - name: 'secret_version_deletion_policy_disable' + primary_resource_id: 'secret-version-deletion-policy' + vars: + secret_id: 'secret-version' + data: 'secret-data' + ignore_read_extra: + - 'deletion_policy' + - name: 'secret_version_with_base64_string_secret_data' + primary_resource_id: 'secret-version-base64' + vars: + secret_id: 'secret-version' + data: 'secret-data.pfx' + test_vars_overrides: + 'data': '"./test-fixtures/binary-file.pfx"' + ignore_read_extra: + - 'is_secret_data_base64' +virtual_fields: + - name: 'deletion_policy' + description: | + The deletion policy for the secret version. Setting `ABANDON` allows the resource + to be abandoned rather than deleted. Setting `DISABLE` allows the resource to be + disabled rather than deleted. Default is `DELETE`. Possible values are: + * DELETE + * DISABLE + * ABANDON + type: String + default_value: "DELETE" +parameters: + - name: 'secret' + type: ResourceRef + description: | + Secret Manager secret resource + url_param_only: true + required: true + immutable: true + resource: 'Secret' + imports: 'name' +properties: + - name: 'enabled' + type: Boolean + description: | + The current state of the SecretVersion. + api_name: state + immutable: false + custom_flatten: 'templates/terraform/custom_flatten/go/secret_version_enable.go.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/secret_version_enable.go.tmpl' + default_value: true + - name: 'name' + type: String + description: | + The resource name of the SecretVersion. Format: + `projects/{{project}}/secrets/{{secret_id}}/versions/{{version}}` + output: true + - name: 'version' + type: String + description: | + The version of the Secret. + output: true + custom_flatten: 'templates/terraform/custom_flatten/go/secret_version_version.go.tmpl' + - name: 'createTime' + type: String + description: | + The time at which the Secret was created. + output: true + - name: 'destroyTime' + type: String + description: | + The time at which the Secret was destroyed. Only present if state is DESTROYED. + output: true + - name: 'payload' + type: NestedObject + description: The secret payload of the SecretVersion. + required: true + custom_flatten: 'templates/terraform/custom_flatten/go/secret_version_access.go.tmpl' + flatten_object: true + properties: + - name: 'secret_data' + type: String + description: The secret data. Must be no larger than 64KiB. + api_name: data + required: true + immutable: true + sensitive: true + custom_expand: 'templates/terraform/custom_expand/go/secret_version_secret_data.go.tmpl' diff --git a/mmv1/products/secretmanager/go_product.yaml b/mmv1/products/secretmanager/go_product.yaml new file mode 100644 index 000000000000..85e5d41ec6a9 --- /dev/null +++ b/mmv1/products/secretmanager/go_product.yaml @@ -0,0 +1,24 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'SecretManager' +display_name: 'Secret Manager' +versions: + - name: 'ga' + base_url: 'https://secretmanager.googleapis.com/v1/' + - name: 'beta' + base_url: 'https://secretmanager.googleapis.com/v1/' +scopes: + - 'https://www.googleapis.com/auth/cloud-platform' diff --git a/mmv1/products/securesourcemanager/go_Instance.yaml b/mmv1/products/securesourcemanager/go_Instance.yaml new file mode 100644 index 000000000000..e51b0e23130f --- /dev/null +++ b/mmv1/products/securesourcemanager/go_Instance.yaml @@ -0,0 +1,222 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'Instance' +kind: 'securesourcemanager#instance' +description: 'Instances are deployed to an available Google Cloud region and are accessible via their web interface.' +references: + guides: + 'Official Documentation': 'https://cloud.google.com/secure-source-manager/docs/create-instance' + api: 'https://cloud.google.com/secure-source-manager/docs/reference/rest/v1/projects.locations.instances' +docs: +base_url: 'projects/{{project}}/locations/{{location}}/instances?instance_id={{instance_id}}' +self_link: 'projects/{{project}}/locations/{{location}}/instances/{{instance_id}}' +immutable: true +import_format: + - 'projects/{{project}}/locations/{{location}}/instances/{{instance_id}}' + - '{{instance_id}}' +timeouts: + insert_minutes: 60 + update_minutes: 60 + delete_minutes: 60 +autogen_async: true +async: + actions: ['create', 'delete'] + type: 'OpAsync' + operation: + base_url: '{{op_id}}' + path: 'name' + wait_ms: 1000 + timeouts: + insert_minutes: 60 + update_minutes: 60 + delete_minutes: 60 + result: + path: 'response' + resource_inside_response: false + error: + path: 'error' + message: 'message' +iam_policy: + method_name_separator: ':' + allowed_iam_role: 'roles/securesourcemanager.instanceManager' + admin_iam_role: 'roles/securesourcemanager.instanceOwner' + parent_resource_attribute: 'instance_id' + example_config_body: 'templates/terraform/iam/go/iam_attributes.go.tmpl' + import_format: + - 'projects/{{project}}/locations/{{location}}/instances/{{instance_id}}' + - '{{instance_id}}' +custom_code: +examples: + - name: 'secure_source_manager_instance_basic' + primary_resource_id: 'default' + primary_resource_name: 'fmt.Sprintf("tf-test-my-instance%s", context["random_suffix"])' + vars: + instance_id: 'my-instance' + - name: 'secure_source_manager_instance_cmek' + primary_resource_id: 'default' + primary_resource_name: 'fmt.Sprintf("tf-test-my-instance%s", context["random_suffix"])' + vars: + instance_id: 'my-instance' + keyring_name: 'my-keyring' + key_name: 'my-key' + - name: 'secure_source_manager_instance_private' + primary_resource_id: 'default' + primary_resource_name: 'fmt.Sprintf("tf-test-my-instance%s", context["random_suffix"])' + vars: + instance_id: 'my-instance' + ca_pool_id: 'ca-pool' + root_ca_id: 'root-ca' + external_providers: ["time"] + - name: 'secure_source_manager_instance_private_psc_backend' + primary_resource_id: 'default' + primary_resource_name: 'fmt.Sprintf("tf-test-my-instance%s", context["random_suffix"])' + vars: + network_id: 'my-network' + subnet_id: 'my-subnet' + neg_id: 'my-neg' + backend_id: 'my-backend-service' + proxy_subnet_id: 'my-proxy-subnet' + target_proxy_id: 'my-target-proxy' + fw_rule_to_target_proxy_id: 'fw-rule-target-proxy' + dns_zone_id: 'my-dns-zone' + instance_id: 'my-instance' + ca_pool_id: 'ca-pool' + root_ca_id: 'root-ca' + external_providers: ["time"] + - name: 'secure_source_manager_instance_private_psc_endpoint' + primary_resource_id: 'default' + primary_resource_name: 'fmt.Sprintf("tf-test-my-instance%s", context["random_suffix"])' + vars: + network_id: 'my-network' + subnet_id: 'my-subnet' + address_id: 'my-address' + fw_rule_to_service_attachment_id: 'fw-rule-service-attachment' + dns_zone_id: 'my-dns-zone' + instance_id: 'my-instance' + ca_pool_id: 'ca-pool' + root_ca_id: 'root-ca' + external_providers: ["time"] +parameters: + - name: 'location' + type: String + description: | + The location for the Instance. + url_param_only: true + required: true + immutable: true + - name: 'instance_id' + type: String + description: | + The name for the Instance. + url_param_only: true + required: true + immutable: true +properties: + - name: 'name' + type: String + description: | + The resource name for the Instance. + immutable: true + output: true + - name: 'createTime' + type: Time + description: | + Time the Instance was created in UTC. + output: true + - name: 'updateTime' + type: Time + description: | + Time the Instance was updated in UTC. + output: true + - name: 'labels' + type: KeyValueLabels + description: | + Labels as key value pairs. + - name: 'state' + type: Enum + description: | + The current state of the Instance. + output: true + enum_values: + - 'CREATING' + - 'ACTIVE' + - 'DELETING' + - 'PAUSED' + - 'UNKNOWN' + - name: 'hostConfig' + type: NestedObject + description: | + A list of hostnames for this instance. + output: true + properties: + - name: 'html' + type: String + description: 'HTML hostname.' + output: true + - name: 'api' + type: String + description: 'API hostname.' + output: true + - name: 'gitHttp' + type: String + description: 'Git HTTP hostname.' + output: true + - name: 'gitSsh' + type: String + description: 'Git SSH hostname.' + output: true + - name: 'stateNote' + type: Enum + description: | + Provides information about the current instance state. + output: true + enum_values: + - 'STATE_NOTE_UNSPECIFIED' + - 'PAUSED_CMEK_UNAVAILABLE' + - 'INSTANCE_RESUMING' + - name: 'kmsKey' + type: String + description: | + Customer-managed encryption key name, in the format projects/*/locations/*/keyRings/*/cryptoKeys/*. + immutable: true + - name: 'privateConfig' + type: NestedObject + description: | + Private settings for private instance. + immutable: true + properties: + - name: 'isPrivate' + type: Boolean + description: | + 'Indicate if it's private instance.' + required: true + immutable: true + - name: 'caPool' + type: String + description: | + CA pool resource, resource must in the format of `projects/{project}/locations/{location}/caPools/{ca_pool}`. + required: true + immutable: true + - name: 'httpServiceAttachment' + type: String + description: | + Service Attachment for HTTP, resource is in the format of `projects/{project}/regions/{region}/serviceAttachments/{service_attachment}`. + output: true + - name: 'sshServiceAttachment' + type: String + description: | + Service Attachment for SSH, resource is in the format of `projects/{project}/regions/{region}/serviceAttachments/{service_attachment}`. + output: true diff --git a/mmv1/products/securesourcemanager/go_Repository.yaml b/mmv1/products/securesourcemanager/go_Repository.yaml new file mode 100644 index 000000000000..3e07e7c5e300 --- /dev/null +++ b/mmv1/products/securesourcemanager/go_Repository.yaml @@ -0,0 +1,159 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'Repository' +description: 'Repositories store source code. It supports all Git SCM client commands and has built-in pull requests and issue tracking. Both HTTPS and SSH authentication are supported.' +references: + guides: + 'Official Documentation': 'https://cloud.google.com/secure-source-manager/docs/overview' +docs: +base_url: 'projects/{{project}}/locations/{{location}}/repositories?repository_id={{repository_id}}' +self_link: 'projects/{{project}}/locations/{{location}}/repositories/{{repository_id}}' +immutable: true +import_format: + - 'projects/{{project}}/locations/{{location}}/repositories/{{repository_id}}' + - '{{repository_id}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +autogen_async: true +async: + actions: ['create', 'delete'] + type: 'OpAsync' + operation: + base_url: '{{op_id}}' + path: 'name' + wait_ms: 1000 + result: + path: 'response' + resource_inside_response: false + error: + path: 'error' + message: 'message' +iam_policy: + method_name_separator: ':' + allowed_iam_role: 'roles/securesourcemanager.repoAdmin' + parent_resource_attribute: 'repository_id' + example_config_body: 'templates/terraform/iam/go/iam_attributes.go.tmpl' + import_format: + - 'projects/{{project}}/locations/{{location}}/repositories/{{repository_id}}' + - '{{repository_id}}' +custom_code: +examples: + - name: 'secure_source_manager_repository_basic' + primary_resource_id: 'default' + primary_resource_name: 'fmt.Sprintf("tf-test-my-repository%s", context["random_suffix"])' + vars: + repository_id: 'my-repository' + instance_id: 'my-instance' + - name: 'secure_source_manager_repository_initial_config' + primary_resource_id: 'default' + primary_resource_name: 'fmt.Sprintf("tf-test-my-repository%s", context["random_suffix"])' + vars: + repository_id: 'my-repository' + instance_id: 'my-instance' +parameters: + - name: 'location' + type: String + description: | + The location for the Repository. + url_param_only: true + required: true + - name: 'repository_id' + type: String + description: | + The ID for the Repository. + url_param_only: true + required: true +properties: + - name: 'name' + type: String + description: | + The resource name for the Repository. + output: true + - name: 'description' + type: String + description: | + Description of the repository, which cannot exceed 500 characters. + - name: 'instance' + type: String + description: | + The name of the instance in which the repository is hosted. + required: true + diff_suppress_func: 'tpgresource.ProjectNumberDiffSuppress' + - name: 'uid' + type: String + description: | + Unique identifier of the repository. + output: true + - name: 'createTime' + type: Time + description: | + Time the repository was created in UTC. + output: true + - name: 'updateTime' + type: Time + description: | + Time the repository was updated in UTC. + output: true + - name: 'uris' + type: NestedObject + description: | + URIs for the repository. + output: true + properties: + - name: 'html' + type: String + description: | + HTML is the URI for the user to view the repository in a browser. + output: true + - name: 'gitHttps' + type: String + description: + git_https is the git HTTPS URI for git operations. + output: true + - name: 'api' + type: String + description: | + API is the URI for API access. + output: true + - name: 'initialConfig' + type: NestedObject + description: | + Initial configurations for the repository. + ignore_read: true + properties: + - name: 'defaultBranch' + type: String + description: | + Default branch name of the repository. + - name: 'gitignores' + type: Array + description: | + List of gitignore template names user can choose from. + Valid values can be viewed at https://cloud.google.com/secure-source-manager/docs/reference/rest/v1/projects.locations.repositories#initialconfig. + item_type: + type: String + - name: 'license' + type: String + description: | + License template name user can choose from. + Valid values can be viewed at https://cloud.google.com/secure-source-manager/docs/reference/rest/v1/projects.locations.repositories#initialconfig. + - name: 'readme' + type: String + description: | + README template name. + Valid values can be viewed at https://cloud.google.com/secure-source-manager/docs/reference/rest/v1/projects.locations.repositories#initialconfig. diff --git a/mmv1/products/securesourcemanager/go_product.yaml b/mmv1/products/securesourcemanager/go_product.yaml new file mode 100644 index 000000000000..b5516f1172af --- /dev/null +++ b/mmv1/products/securesourcemanager/go_product.yaml @@ -0,0 +1,22 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'SecureSourceManager' +display_name: 'Secure Source Manager' +versions: + - name: 'ga' + base_url: 'https://securesourcemanager.googleapis.com/v1/' +scopes: + - 'https://www.googleapis.com/auth/cloud-platform' diff --git a/mmv1/products/vmwareengine/go_ExternalAccessRule.yaml b/mmv1/products/vmwareengine/go_ExternalAccessRule.yaml index 136627c63573..b6fd372d0737 100644 --- a/mmv1/products/vmwareengine/go_ExternalAccessRule.yaml +++ b/mmv1/products/vmwareengine/go_ExternalAccessRule.yaml @@ -47,6 +47,7 @@ async: path: 'error' message: 'message' include_project: true +skip_sweeper: true custom_code: examples: - name: 'vmware_engine_external_access_rule_basic' diff --git a/mmv1/products/vmwareengine/go_ExternalAddress.yaml b/mmv1/products/vmwareengine/go_ExternalAddress.yaml index 74a36a4ba448..c853ea1348e6 100644 --- a/mmv1/products/vmwareengine/go_ExternalAddress.yaml +++ b/mmv1/products/vmwareengine/go_ExternalAddress.yaml @@ -51,9 +51,9 @@ async: path: 'error' message: 'message' include_project: true +skip_sweeper: true custom_code: error_retry_predicates: - - 'transport_tpg.ExternalIpServiceNotActive' examples: - name: 'vmware_engine_external_address_basic' diff --git a/mmv1/templates/terraform/constants/go/netapp_volume_replication.go.tmpl b/mmv1/templates/terraform/constants/go/netapp_volume_replication.go.tmpl index 5037739e6f5e..dcae7d2b141c 100644 --- a/mmv1/templates/terraform/constants/go/netapp_volume_replication.go.tmpl +++ b/mmv1/templates/terraform/constants/go/netapp_volume_replication.go.tmpl @@ -1,5 +1,5 @@ // Custom function to wait for mirrorState target states -func NetAppVolumeReplicationWaitForMirror(d *schema.ResourceData, meta interface{}, targetState string) error { +func NetappVolumeReplicationWaitForMirror(d *schema.ResourceData, meta interface{}, targetState string) error { config := meta.(*transport_tpg.Config) userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { diff --git a/mmv1/templates/terraform/constants/go/notebooks_instance.go.tmpl b/mmv1/templates/terraform/constants/go/notebooks_instance.go.tmpl index 17c73cc0954e..fe8d016797b3 100644 --- a/mmv1/templates/terraform/constants/go/notebooks_instance.go.tmpl +++ b/mmv1/templates/terraform/constants/go/notebooks_instance.go.tmpl @@ -87,8 +87,7 @@ func modifyNotebooksInstanceState(config *transport_tpg.Config, d *schema.Resour } return res, nil } - -{{- if ne $.Compiler "terraformgoogleconversion-codegen" }} +{{ if ne $.Compiler "terraformgoogleconversion-codegen" }} func waitForNotebooksOperation(config *transport_tpg.Config, d *schema.ResourceData, project string, billingProject string, userAgent string, response map[string]interface{}) error { var opRes map[string]interface{} err := NotebooksOperationWaitTimeWithResponse( diff --git a/mmv1/templates/terraform/constants/netapp_volume_replication.go.erb b/mmv1/templates/terraform/constants/netapp_volume_replication.go.erb index 10ec7b806253..2b4d61481b1d 100644 --- a/mmv1/templates/terraform/constants/netapp_volume_replication.go.erb +++ b/mmv1/templates/terraform/constants/netapp_volume_replication.go.erb @@ -1,5 +1,5 @@ // Custom function to wait for mirrorState target states -func NetAppVolumeReplicationWaitForMirror(d *schema.ResourceData, meta interface{}, targetState string) error { +func NetappVolumeReplicationWaitForMirror(d *schema.ResourceData, meta interface{}, targetState string) error { config := meta.(*transport_tpg.Config) userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { diff --git a/mmv1/templates/terraform/examples/go/parallelstore_instance_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/parallelstore_instance_basic.tf.tmpl index 55babfe963d7..98fa0d852561 100644 --- a/mmv1/templates/terraform/examples/go/parallelstore_instance_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/parallelstore_instance_basic.tf.tmpl @@ -4,7 +4,8 @@ resource "google_parallelstore_instance" "{{$.PrimaryResourceId}}" { description = "test instance" capacity_gib = 12000 network = google_compute_network.network.name - + file_stripe_level = "FILE_STRIPE_LEVEL_MIN" + directory_stripe_level = "DIRECTORY_STRIPE_LEVEL_MIN" labels = { test = "value" } diff --git a/mmv1/templates/terraform/examples/go/redis_cluster_ha.tf.tmpl b/mmv1/templates/terraform/examples/go/redis_cluster_ha.tf.tmpl index 492a9153374d..46a2fba9d942 100644 --- a/mmv1/templates/terraform/examples/go/redis_cluster_ha.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/redis_cluster_ha.tf.tmpl @@ -12,16 +12,14 @@ resource "google_redis_cluster" "{{$.PrimaryResourceId}}" { redis_configs = { maxmemory-policy = "volatile-ttl" } + deletion_protection_enabled = {{index $.Vars "deletion_protection_enabled"}} + zone_distribution_config { mode = "MULTI_ZONE" } depends_on = [ google_network_connectivity_service_connection_policy.default ] - - lifecycle { - prevent_destroy = {{index $.Vars "prevent_destroy"}} - } } resource "google_network_connectivity_service_connection_policy" "default" { diff --git a/mmv1/templates/terraform/examples/go/redis_cluster_ha_single_zone.tf.tmpl b/mmv1/templates/terraform/examples/go/redis_cluster_ha_single_zone.tf.tmpl index 0d3bcb48f87a..b7289de2d3dc 100644 --- a/mmv1/templates/terraform/examples/go/redis_cluster_ha_single_zone.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/redis_cluster_ha_single_zone.tf.tmpl @@ -9,13 +9,11 @@ resource "google_redis_cluster" "{{$.PrimaryResourceId}}" { mode = "SINGLE_ZONE" zone = "us-central1-f" } + deletion_protection_enabled = {{index $.Vars "deletion_protection_enabled"}} depends_on = [ google_network_connectivity_service_connection_policy.default ] - lifecycle { - prevent_destroy = {{index $.Vars "prevent_destroy"}} - } } resource "google_network_connectivity_service_connection_policy" "default" { diff --git a/mmv1/templates/terraform/expand_property_method.go.tmpl b/mmv1/templates/terraform/expand_property_method.go.tmpl index 27f341dfa1b8..87da148953e8 100644 --- a/mmv1/templates/terraform/expand_property_method.go.tmpl +++ b/mmv1/templates/terraform/expand_property_method.go.tmpl @@ -87,7 +87,7 @@ func expand{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d tpgresource.T {{- if $.IsSet }} v = v.(*schema.Set).List() {{- end }} - {{- if or $.NestedProperties $.AllowEmptyObject }} + {{- if or $.NestedProperties (and ($.IsA "NestedObject") $.AllowEmptyObject) }} l := v.([]interface{}) {{- if $.IsA "Array" }} req := make([]interface{}, 0, len(l)) diff --git a/mmv1/templates/terraform/post_create/go/netapp_volume_replication_post_create.go.tmpl b/mmv1/templates/terraform/post_create/go/netapp_volume_replication_post_create.go.tmpl index b2d44a9c063e..b73fa981dc8a 100644 --- a/mmv1/templates/terraform/post_create/go/netapp_volume_replication_post_create.go.tmpl +++ b/mmv1/templates/terraform/post_create/go/netapp_volume_replication_post_create.go.tmpl @@ -1,6 +1,6 @@ if d.Get("wait_for_mirror").(bool) == true { // Wait for mirrorState=MIRRORED before treating the resource as created - err = NetAppVolumeReplicationWaitForMirror(d, meta, "MIRRORED") + err = NetappVolumeReplicationWaitForMirror(d, meta, "MIRRORED") if err != nil { return fmt.Errorf("Error waiting for volume replication to reach mirror_state==MIRRORED: %s", err) } diff --git a/mmv1/templates/terraform/post_create/netapp_volume_replication_post_create.go.erb b/mmv1/templates/terraform/post_create/netapp_volume_replication_post_create.go.erb index b2d44a9c063e..b73fa981dc8a 100644 --- a/mmv1/templates/terraform/post_create/netapp_volume_replication_post_create.go.erb +++ b/mmv1/templates/terraform/post_create/netapp_volume_replication_post_create.go.erb @@ -1,6 +1,6 @@ if d.Get("wait_for_mirror").(bool) == true { // Wait for mirrorState=MIRRORED before treating the resource as created - err = NetAppVolumeReplicationWaitForMirror(d, meta, "MIRRORED") + err = NetappVolumeReplicationWaitForMirror(d, meta, "MIRRORED") if err != nil { return fmt.Errorf("Error waiting for volume replication to reach mirror_state==MIRRORED: %s", err) } diff --git a/mmv1/templates/terraform/post_update/go/netapp_volume_replication_mirror_state.go.tmpl b/mmv1/templates/terraform/post_update/go/netapp_volume_replication_mirror_state.go.tmpl index 5cac7eebb9b2..71724395b40d 100644 --- a/mmv1/templates/terraform/post_update/go/netapp_volume_replication_mirror_state.go.tmpl +++ b/mmv1/templates/terraform/post_update/go/netapp_volume_replication_mirror_state.go.tmpl @@ -81,7 +81,7 @@ if do_change { // If user specified to wait for mirror operations, wait to reach target state if d.Get("wait_for_mirror").(bool) == true { - err = NetAppVolumeReplicationWaitForMirror(d, meta, targetState) + err = NetappVolumeReplicationWaitForMirror(d, meta, targetState) if err != nil { return fmt.Errorf("Error waiting for volume replication to reach mirror_state==%s: %s", targetState, err) } diff --git a/mmv1/templates/terraform/post_update/netapp_volume_replication_mirror_state.go.erb b/mmv1/templates/terraform/post_update/netapp_volume_replication_mirror_state.go.erb index 57a7146702e5..1c6c32387561 100644 --- a/mmv1/templates/terraform/post_update/netapp_volume_replication_mirror_state.go.erb +++ b/mmv1/templates/terraform/post_update/netapp_volume_replication_mirror_state.go.erb @@ -81,7 +81,7 @@ if do_change { // If user specified to wait for mirror operations, wait to reach target state if d.Get("wait_for_mirror").(bool) == true { - err = NetAppVolumeReplicationWaitForMirror(d, meta, targetState) + err = NetappVolumeReplicationWaitForMirror(d, meta, targetState) if err != nil { return fmt.Errorf("Error waiting for volume replication to reach mirror_state==%s: %s", targetState, err) } diff --git a/mmv1/third_party/terraform/services/netapp/go/resource_netapp_storage_pool_test.go.tmpl b/mmv1/third_party/terraform/services/netapp/go/resource_netapp_storage_pool_test.go.tmpl index 47a9f3f0d245..75c4c167ddfa 100644 --- a/mmv1/third_party/terraform/services/netapp/go/resource_netapp_storage_pool_test.go.tmpl +++ b/mmv1/third_party/terraform/services/netapp/go/resource_netapp_storage_pool_test.go.tmpl @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/terraform-provider-google/google/acctest" ) -func TestAccNetappstoragePool_storagePoolCreateExample_update(t *testing.T) { +func TestAccNetappStoragePool_storagePoolCreateExample_update(t *testing.T) { t.Parallel() context := map[string]interface{}{ @@ -23,7 +23,7 @@ func TestAccNetappstoragePool_storagePoolCreateExample_update(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), Steps: []resource.TestStep{ { - Config: testAccNetappstoragePool_storagePoolCreateExample_full(context), + Config: testAccNetappStoragePool_storagePoolCreateExample_full(context), }, { ResourceName: "google_netapp_storage_pool.test_pool", @@ -32,7 +32,7 @@ func TestAccNetappstoragePool_storagePoolCreateExample_update(t *testing.T) { ImportStateVerifyIgnore: []string{"location", "name", "labels", "terraform_labels"}, }, { - Config: testAccNetappstoragePool_storagePoolCreateExample_update(context), + Config: testAccNetappStoragePool_storagePoolCreateExample_update(context), }, { ResourceName: "google_netapp_storage_pool.test_pool", @@ -44,7 +44,7 @@ func TestAccNetappstoragePool_storagePoolCreateExample_update(t *testing.T) { }) } -func testAccNetappstoragePool_storagePoolCreateExample_full(context map[string]interface{}) string { +func testAccNetappStoragePool_storagePoolCreateExample_full(context map[string]interface{}) string { return acctest.Nprintf(` resource "google_compute_network" "peering_network" { @@ -86,7 +86,7 @@ resource "google_netapp_storage_pool" "test_pool" { `, context) } -func testAccNetappstoragePool_storagePoolCreateExample_update(context map[string]interface{}) string { +func testAccNetappStoragePool_storagePoolCreateExample_update(context map[string]interface{}) string { return acctest.Nprintf(` resource "google_compute_network" "peering_network" { @@ -129,7 +129,7 @@ resource "google_netapp_storage_pool" "test_pool" { } {{ if ne $.TargetVersionName `ga` -}} -func TestAccNetappstoragePool_FlexRegionalStoragePoolCreateExample_update(t *testing.T) { +func TestAccNetappStoragePool_FlexRegionalStoragePoolCreateExample_update(t *testing.T) { context := map[string]interface{}{ "network_name": acctest.BootstrapSharedServiceNetworkingConnection(t, "gcnv-network-config-1", acctest.ServiceNetworkWithParentService("netapp.servicenetworking.goog")), "random_suffix": acctest.RandString(t, 10), @@ -138,13 +138,13 @@ func TestAccNetappstoragePool_FlexRegionalStoragePoolCreateExample_update(t *tes acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), - CheckDestroy: testAccCheckNetappstoragePoolDestroyProducer(t), + CheckDestroy: testAccCheckNetappStoragePoolDestroyProducer(t), ExternalProviders: map[string]resource.ExternalProvider{ "time": {}, }, Steps: []resource.TestStep{ { - Config: testAccNetappstoragePool_FlexRegionalStoragePoolCreateExample_full(context), + Config: testAccNetappStoragePool_FlexRegionalStoragePoolCreateExample_full(context), }, { ResourceName: "google_netapp_storage_pool.test_pool", @@ -153,8 +153,8 @@ func TestAccNetappstoragePool_FlexRegionalStoragePoolCreateExample_update(t *tes ImportStateVerifyIgnore: []string{"location", "name", "labels", "terraform_labels"}, }, { - Config: testAccNetappstoragePool_FlexRegionalStoragePoolCreateExample_switchZone(context), - Check: testAccNetappstoragePool_FlexRegionalStoragePoolCreateExample_sleep_5_mins(), + Config: testAccNetappStoragePool_FlexRegionalStoragePoolCreateExample_switchZone(context), + Check: testAccNetappStoragePool_FlexRegionalStoragePoolCreateExample_sleep_5_mins(), }, { ResourceName: "google_netapp_storage_pool.test_pool", @@ -163,7 +163,7 @@ func TestAccNetappstoragePool_FlexRegionalStoragePoolCreateExample_update(t *tes ImportStateVerifyIgnore: []string{"location", "name", "labels", "terraform_labels"}, }, { - Config: testAccNetappstoragePool_FlexRegionalStoragePoolCreateExample_switchBackZone(context), + Config: testAccNetappStoragePool_FlexRegionalStoragePoolCreateExample_switchBackZone(context), }, { ResourceName: "google_netapp_storage_pool.test_pool", @@ -175,7 +175,7 @@ func TestAccNetappstoragePool_FlexRegionalStoragePoolCreateExample_update(t *tes }) } -func testAccNetappstoragePool_FlexRegionalStoragePoolCreateExample_full(context map[string]interface{}) string { +func testAccNetappStoragePool_FlexRegionalStoragePoolCreateExample_full(context map[string]interface{}) string { return acctest.Nprintf(` resource "google_netapp_storage_pool" "test_pool" { provider = google-beta @@ -200,7 +200,7 @@ data "google_compute_network" "default" { `, context) } -func testAccNetappstoragePool_FlexRegionalStoragePoolCreateExample_switchZone(context map[string]interface{}) string { +func testAccNetappStoragePool_FlexRegionalStoragePoolCreateExample_switchZone(context map[string]interface{}) string { return acctest.Nprintf(` resource "google_netapp_storage_pool" "test_pool" { provider = google-beta @@ -225,7 +225,7 @@ data "google_compute_network" "default" { `, context) } -func testAccNetappstoragePool_FlexRegionalStoragePoolCreateExample_sleep_5_mins() resource.TestCheckFunc { +func testAccNetappStoragePool_FlexRegionalStoragePoolCreateExample_sleep_5_mins() resource.TestCheckFunc { return func(s *terraform.State) error { // wait 5 minutes before executing the switchback due to api zone switch issues time.Sleep(5 * time.Minute) @@ -233,7 +233,7 @@ func testAccNetappstoragePool_FlexRegionalStoragePoolCreateExample_sleep_5_mins( } } -func testAccNetappstoragePool_FlexRegionalStoragePoolCreateExample_switchBackZone(context map[string]interface{}) string { +func testAccNetappStoragePool_FlexRegionalStoragePoolCreateExample_switchBackZone(context map[string]interface{}) string { return acctest.Nprintf(` resource "google_netapp_storage_pool" "test_pool" { provider = google-beta diff --git a/mmv1/third_party/terraform/services/netapp/resource_netapp_active_directory_sweeper.go b/mmv1/third_party/terraform/services/netapp/resource_netapp_active_directory_sweeper.go index d2144f728bb5..150f1ef9120a 100644 --- a/mmv1/third_party/terraform/services/netapp/resource_netapp_active_directory_sweeper.go +++ b/mmv1/third_party/terraform/services/netapp/resource_netapp_active_directory_sweeper.go @@ -13,12 +13,12 @@ import ( ) func init() { - sweeper.AddTestSweepers("NetappactiveDirectory", testSweepNetappactiveDirectory) + sweeper.AddTestSweepers("NetappActiveDirectory", testSweepNetappActiveDirectory) } // At the time of writing, the CI only passes us-central1 as the region -func testSweepNetappactiveDirectory(region string) error { - resourceName := "NetappactiveDirectory" +func testSweepNetappActiveDirectory(region string) error { + resourceName := "NetappActiveDirectory" log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) config, err := sweeper.SharedConfigForRegion(region) diff --git a/mmv1/third_party/terraform/services/netapp/resource_netapp_active_directory_test.go b/mmv1/third_party/terraform/services/netapp/resource_netapp_active_directory_test.go index cebdcdd2ec9e..a22b767b9008 100644 --- a/mmv1/third_party/terraform/services/netapp/resource_netapp_active_directory_test.go +++ b/mmv1/third_party/terraform/services/netapp/resource_netapp_active_directory_test.go @@ -8,7 +8,7 @@ import ( "github.com/hashicorp/terraform-provider-google/google/acctest" ) -func TestAccNetappactiveDirectory_activeDirectory_FullUpdate(t *testing.T) { +func TestAccNetappActiveDirectory_activeDirectory_FullUpdate(t *testing.T) { t.Parallel() context := map[string]interface{}{ @@ -20,7 +20,7 @@ func TestAccNetappactiveDirectory_activeDirectory_FullUpdate(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), Steps: []resource.TestStep{ { - Config: testAccNetappactiveDirectory_activeDirectoryCreateExample_Full(context), + Config: testAccNetappActiveDirectory_activeDirectoryCreateExample_Full(context), }, { ResourceName: "google_netapp_active_directory.test_active_directory_full", @@ -29,7 +29,7 @@ func TestAccNetappactiveDirectory_activeDirectory_FullUpdate(t *testing.T) { ImportStateVerifyIgnore: []string{"location", "name", "pass", "labels", "terraform_labels"}, }, { - Config: testAccNetappactiveDirectory_activeDirectoryCreateExample_Update(context), + Config: testAccNetappActiveDirectory_activeDirectoryCreateExample_Update(context), }, { ResourceName: "google_netapp_active_directory.test_active_directory_full", @@ -41,7 +41,7 @@ func TestAccNetappactiveDirectory_activeDirectory_FullUpdate(t *testing.T) { }) } -func testAccNetappactiveDirectory_activeDirectoryCreateExample_Full(context map[string]interface{}) string { +func testAccNetappActiveDirectory_activeDirectoryCreateExample_Full(context map[string]interface{}) string { return acctest.Nprintf(` resource "google_netapp_active_directory" "test_active_directory_full" { name = "tf-test-test-active-directory-full%{random_suffix}" @@ -70,7 +70,7 @@ resource "google_netapp_active_directory" "test_active_directory_full" { `, context) } -func testAccNetappactiveDirectory_activeDirectoryCreateExample_Update(context map[string]interface{}) string { +func testAccNetappActiveDirectory_activeDirectoryCreateExample_Update(context map[string]interface{}) string { return acctest.Nprintf(` resource "google_netapp_active_directory" "test_active_directory_full" { name = "tf-test-test-active-directory-full%{random_suffix}" diff --git a/mmv1/third_party/terraform/services/netapp/resource_netapp_backup_policy_sweeper.go b/mmv1/third_party/terraform/services/netapp/resource_netapp_backup_policy_sweeper.go index 0525bb934407..d270cfe739a7 100644 --- a/mmv1/third_party/terraform/services/netapp/resource_netapp_backup_policy_sweeper.go +++ b/mmv1/third_party/terraform/services/netapp/resource_netapp_backup_policy_sweeper.go @@ -13,12 +13,12 @@ import ( ) func init() { - sweeper.AddTestSweepers("NetappbackupPolicy", testSweepNetappbackupPolicy) + sweeper.AddTestSweepers("NetappBackupPolicy", testSweepNetappBackupPolicy) } // At the time of writing, the CI only passes us-central1 as the region -func testSweepNetappbackupPolicy(region string) error { - resourceName := "NetappbackupPolicy" +func testSweepNetappBackupPolicy(region string) error { + resourceName := "NetappBackupPolicy" log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) config, err := sweeper.SharedConfigForRegion(region) diff --git a/mmv1/third_party/terraform/services/netapp/resource_netapp_backup_policy_test.go b/mmv1/third_party/terraform/services/netapp/resource_netapp_backup_policy_test.go index 181b39bb949f..14aa24ae1bfa 100644 --- a/mmv1/third_party/terraform/services/netapp/resource_netapp_backup_policy_test.go +++ b/mmv1/third_party/terraform/services/netapp/resource_netapp_backup_policy_test.go @@ -8,7 +8,7 @@ import ( "github.com/hashicorp/terraform-provider-google/google/acctest" ) -func TestAccNetappbackupPolicy_netappBackupPolicyFullExample_update(t *testing.T) { +func TestAccNetappBackupPolicy_NetappBackupPolicyFullExample_update(t *testing.T) { t.Parallel() context := map[string]interface{}{ @@ -18,10 +18,10 @@ func TestAccNetappbackupPolicy_netappBackupPolicyFullExample_update(t *testing.T acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckNetappbackupPolicyDestroyProducer(t), + CheckDestroy: testAccCheckNetappBackupPolicyDestroyProducer(t), Steps: []resource.TestStep{ { - Config: testAccNetappbackupPolicy_netappBackupPolicyFullExample_basic(context), + Config: testAccNetappBackupPolicy_NetappBackupPolicyFullExample_basic(context), }, { ResourceName: "google_netapp_backup_policy.test_backup_policy_full", @@ -30,7 +30,7 @@ func TestAccNetappbackupPolicy_netappBackupPolicyFullExample_update(t *testing.T ImportStateVerifyIgnore: []string{"location", "name", "labels", "terraform_labels"}, }, { - Config: testAccNetappbackupPolicy_netappBackupPolicyFullExample_updates(context), + Config: testAccNetappBackupPolicy_NetappBackupPolicyFullExample_updates(context), }, { ResourceName: "google_netapp_backup_policy.test_backup_policy_full", @@ -38,7 +38,7 @@ func TestAccNetappbackupPolicy_netappBackupPolicyFullExample_update(t *testing.T ImportStateVerify: true, ImportStateVerifyIgnore: []string{"location", "name", "labels", "terraform_labels"}, }, { - Config: testAccNetappbackupPolicy_netappBackupPolicyFullExample_disable(context), + Config: testAccNetappBackupPolicy_NetappBackupPolicyFullExample_disable(context), }, { ResourceName: "google_netapp_backup_policy.test_backup_policy_full", @@ -51,7 +51,7 @@ func TestAccNetappbackupPolicy_netappBackupPolicyFullExample_update(t *testing.T } // Setup minimal policy -func testAccNetappbackupPolicy_netappBackupPolicyFullExample_basic(context map[string]interface{}) string { +func testAccNetappBackupPolicy_NetappBackupPolicyFullExample_basic(context map[string]interface{}) string { return acctest.Nprintf(` resource "google_netapp_backup_policy" "test_backup_policy_full" { name = "tf-test-test-backup-policy-full%{random_suffix}" @@ -64,7 +64,7 @@ resource "google_netapp_backup_policy" "test_backup_policy_full" { } // Update all fields -func testAccNetappbackupPolicy_netappBackupPolicyFullExample_updates(context map[string]interface{}) string { +func testAccNetappBackupPolicy_NetappBackupPolicyFullExample_updates(context map[string]interface{}) string { return acctest.Nprintf(` resource "google_netapp_backup_policy" "test_backup_policy_full" { name = "tf-test-test-backup-policy-full%{random_suffix}" @@ -82,7 +82,7 @@ resource "google_netapp_backup_policy" "test_backup_policy_full" { } // test disabling the policy -func testAccNetappbackupPolicy_netappBackupPolicyFullExample_disable(context map[string]interface{}) string { +func testAccNetappBackupPolicy_NetappBackupPolicyFullExample_disable(context map[string]interface{}) string { return acctest.Nprintf(` resource "google_netapp_backup_policy" "test_backup_policy_full" { name = "tf-test-test-backup-policy-full%{random_suffix}" diff --git a/mmv1/third_party/terraform/services/netapp/resource_netapp_backup_sweeper.go b/mmv1/third_party/terraform/services/netapp/resource_netapp_backup_sweeper.go index 30eed9817dc5..d8df15b8c9df 100644 --- a/mmv1/third_party/terraform/services/netapp/resource_netapp_backup_sweeper.go +++ b/mmv1/third_party/terraform/services/netapp/resource_netapp_backup_sweeper.go @@ -13,12 +13,12 @@ import ( ) func init() { - sweeper.AddTestSweepers("Netappbackup", testSweepNetappbackup) + sweeper.AddTestSweepers("NetappBackup", testSweepNetappBackup) } // At the time of writing, the CI only passes us-central1 as the region -func testSweepNetappbackup(region string) error { - resourceName := "Netappbackup" +func testSweepNetappBackup(region string) error { + resourceName := "NetappBackup" log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) config, err := sweeper.SharedConfigForRegion(region) diff --git a/mmv1/third_party/terraform/services/netapp/resource_netapp_backup_test.go b/mmv1/third_party/terraform/services/netapp/resource_netapp_backup_test.go index 26da78976abf..e2cc27b4b3c4 100644 --- a/mmv1/third_party/terraform/services/netapp/resource_netapp_backup_test.go +++ b/mmv1/third_party/terraform/services/netapp/resource_netapp_backup_test.go @@ -10,7 +10,7 @@ import ( "github.com/hashicorp/terraform-provider-google/google/acctest" ) -func TestAccNetappbackup_netappBackupFull_update(t *testing.T) { +func TestAccNetappBackup_NetappBackupFull_update(t *testing.T) { context := map[string]interface{}{ "network_name": acctest.BootstrapSharedServiceNetworkingConnection(t, "gcnv-network-config-1", acctest.ServiceNetworkWithParentService("netapp.servicenetworking.goog")), "random_suffix": acctest.RandString(t, 10), @@ -19,10 +19,10 @@ func TestAccNetappbackup_netappBackupFull_update(t *testing.T) { acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckNetappbackupDestroyProducer(t), + CheckDestroy: testAccCheckNetappBackupDestroyProducer(t), Steps: []resource.TestStep{ { - Config: testAccNetappbackup_netappBackupFromVolumeSnapshot(context), + Config: testAccNetappBackup_NetappBackupFromVolumeSnapshot(context), }, { ResourceName: "google_netapp_backup.test_backup", @@ -31,7 +31,7 @@ func TestAccNetappbackup_netappBackupFull_update(t *testing.T) { ImportStateVerifyIgnore: []string{"labels", "location", "name", "terraform_labels", "vault_name"}, }, { - Config: testAccNetappbackup_netappBackupFromVolumeSnapshot_update(context), + Config: testAccNetappBackup_NetappBackupFromVolumeSnapshot_update(context), }, { ResourceName: "google_netapp_backup.test_backup", @@ -43,7 +43,7 @@ func TestAccNetappbackup_netappBackupFull_update(t *testing.T) { }) } -func testAccNetappbackup_netappBackupFromVolumeSnapshot(context map[string]interface{}) string { +func testAccNetappBackup_NetappBackupFromVolumeSnapshot(context map[string]interface{}) string { return acctest.Nprintf(` data "google_compute_network" "default" { name = "%{network_name}" @@ -102,7 +102,7 @@ resource "google_netapp_backup" "test_backup" { `, context) } -func testAccNetappbackup_netappBackupFromVolumeSnapshot_update(context map[string]interface{}) string { +func testAccNetappBackup_NetappBackupFromVolumeSnapshot_update(context map[string]interface{}) string { return acctest.Nprintf(` data "google_compute_network" "default" { name = "%{network_name}" diff --git a/mmv1/third_party/terraform/services/netapp/resource_netapp_backup_vault_sweeper.go b/mmv1/third_party/terraform/services/netapp/resource_netapp_backup_vault_sweeper.go index 2e5ee5a58f1f..9a9b3ca07dd2 100644 --- a/mmv1/third_party/terraform/services/netapp/resource_netapp_backup_vault_sweeper.go +++ b/mmv1/third_party/terraform/services/netapp/resource_netapp_backup_vault_sweeper.go @@ -13,12 +13,12 @@ import ( ) func init() { - sweeper.AddTestSweepers("NetappbackupVault", testSweepNetappbackupVault) + sweeper.AddTestSweepers("NetappBackupVault", testSweepNetappBackupVault) } // At the time of writing, the CI only passes us-central1 as the region -func testSweepNetappbackupVault(region string) error { - resourceName := "NetappbackupVault" +func testSweepNetappBackupVault(region string) error { + resourceName := "NetappBackupVault" log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) config, err := sweeper.SharedConfigForRegion(region) diff --git a/mmv1/third_party/terraform/services/netapp/resource_netapp_backup_vault_test.go b/mmv1/third_party/terraform/services/netapp/resource_netapp_backup_vault_test.go index 4851123c786e..c0420fd24f5f 100644 --- a/mmv1/third_party/terraform/services/netapp/resource_netapp_backup_vault_test.go +++ b/mmv1/third_party/terraform/services/netapp/resource_netapp_backup_vault_test.go @@ -13,7 +13,7 @@ import ( transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) -func TestAccNetappbackupVault_netappBackupVaultExample_update(t *testing.T) { +func TestAccNetappBackupVault_NetappBackupVaultExample_update(t *testing.T) { t.Parallel() context := map[string]interface{}{ @@ -25,7 +25,7 @@ func TestAccNetappbackupVault_netappBackupVaultExample_update(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), Steps: []resource.TestStep{ { - Config: testAccNetappbackupVault_netappBackupVaultExample_basic(context), + Config: testAccNetappBackupVault_NetappBackupVaultExample_basic(context), }, { ResourceName: "google_netapp_backup_vault.test_backup_vault", @@ -34,7 +34,7 @@ func TestAccNetappbackupVault_netappBackupVaultExample_update(t *testing.T) { ImportStateVerifyIgnore: []string{"location", "name", "labels", "terraform_labels"}, }, { - Config: testAccNetappbackupVault_netappBackupVaultExample_update(context), + Config: testAccNetappBackupVault_NetappBackupVaultExample_update(context), }, { ResourceName: "google_netapp_backup_vault.test_backup_vault", @@ -46,7 +46,7 @@ func TestAccNetappbackupVault_netappBackupVaultExample_update(t *testing.T) { }) } -func testAccNetappbackupVault_netappBackupVaultExample_basic(context map[string]interface{}) string { +func testAccNetappBackupVault_NetappBackupVaultExample_basic(context map[string]interface{}) string { return acctest.Nprintf(` resource "google_netapp_backup_vault" "test_backup_vault" { name = "tf-test-test-backup-vault%{random_suffix}" @@ -55,7 +55,7 @@ resource "google_netapp_backup_vault" "test_backup_vault" { `, context) } -func testAccNetappbackupVault_netappBackupVaultExample_update(context map[string]interface{}) string { +func testAccNetappBackupVault_NetappBackupVaultExample_update(context map[string]interface{}) string { return acctest.Nprintf(` resource "google_netapp_backup_vault" "test_backup_vault" { name = "tf-test-test-backup-vault%{random_suffix}" @@ -69,7 +69,7 @@ resource "google_netapp_backup_vault" "test_backup_vault" { `, context) } -func testAccCheckNetappbackupVaultDestroyProducer(t *testing.T) func(s *terraform.State) error { +func testAccCheckNetappBackupVaultDestroyProducer(t *testing.T) func(s *terraform.State) error { return func(s *terraform.State) error { for name, rs := range s.RootModule().Resources { if rs.Type != "google_netapp_backup_vault" { @@ -100,7 +100,7 @@ func testAccCheckNetappbackupVaultDestroyProducer(t *testing.T) func(s *terrafor UserAgent: config.UserAgent, }) if err == nil { - return fmt.Errorf("NetappbackupVault still exists at %s", url) + return fmt.Errorf("NetappBackupVault still exists at %s", url) } } diff --git a/mmv1/third_party/terraform/services/netapp/resource_netapp_storage_pool_sweeper.go b/mmv1/third_party/terraform/services/netapp/resource_netapp_storage_pool_sweeper.go index fad9f8a8c44d..cb414b719daf 100644 --- a/mmv1/third_party/terraform/services/netapp/resource_netapp_storage_pool_sweeper.go +++ b/mmv1/third_party/terraform/services/netapp/resource_netapp_storage_pool_sweeper.go @@ -13,12 +13,12 @@ import ( ) func init() { - sweeper.AddTestSweepers("NetappstoragePool", testSweepNetappstoragePool) + sweeper.AddTestSweepers("NetappStoragePool", testSweepNetappStoragePool) } // At the time of writing, the CI only passes us-central1 as the region -func testSweepNetappstoragePool(region string) error { - resourceName := "NetappstoragePool" +func testSweepNetappStoragePool(region string) error { + resourceName := "NetappStoragePool" log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) config, err := sweeper.SharedConfigForRegion(region) diff --git a/mmv1/third_party/terraform/services/netapp/resource_netapp_storage_pool_test.go.erb b/mmv1/third_party/terraform/services/netapp/resource_netapp_storage_pool_test.go.erb index a165145b1910..22db65c5e0f5 100644 --- a/mmv1/third_party/terraform/services/netapp/resource_netapp_storage_pool_test.go.erb +++ b/mmv1/third_party/terraform/services/netapp/resource_netapp_storage_pool_test.go.erb @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-google/google/acctest" ) -func TestAccNetappstoragePool_storagePoolCreateExample_update(t *testing.T) { +func TestAccNetappStoragePool_storagePoolCreateExample_update(t *testing.T) { t.Parallel() context := map[string]interface{}{ @@ -24,7 +24,7 @@ func TestAccNetappstoragePool_storagePoolCreateExample_update(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), Steps: []resource.TestStep{ { - Config: testAccNetappstoragePool_storagePoolCreateExample_full(context), + Config: testAccNetappStoragePool_storagePoolCreateExample_full(context), }, { ResourceName: "google_netapp_storage_pool.test_pool", @@ -33,7 +33,7 @@ func TestAccNetappstoragePool_storagePoolCreateExample_update(t *testing.T) { ImportStateVerifyIgnore: []string{"location", "name", "labels", "terraform_labels"}, }, { - Config: testAccNetappstoragePool_storagePoolCreateExample_update(context), + Config: testAccNetappStoragePool_storagePoolCreateExample_update(context), }, { ResourceName: "google_netapp_storage_pool.test_pool", @@ -45,7 +45,7 @@ func TestAccNetappstoragePool_storagePoolCreateExample_update(t *testing.T) { }) } -func testAccNetappstoragePool_storagePoolCreateExample_full(context map[string]interface{}) string { +func testAccNetappStoragePool_storagePoolCreateExample_full(context map[string]interface{}) string { return acctest.Nprintf(` resource "google_compute_network" "peering_network" { @@ -87,7 +87,7 @@ resource "google_netapp_storage_pool" "test_pool" { `, context) } -func testAccNetappstoragePool_storagePoolCreateExample_update(context map[string]interface{}) string { +func testAccNetappStoragePool_storagePoolCreateExample_update(context map[string]interface{}) string { return acctest.Nprintf(` resource "google_compute_network" "peering_network" { @@ -130,7 +130,7 @@ resource "google_netapp_storage_pool" "test_pool" { } <% unless version == 'ga' -%> -func TestAccNetappstoragePool_FlexRegionalStoragePoolCreateExample_update(t *testing.T) { +func TestAccNetappStoragePool_FlexRegionalStoragePoolCreateExample_update(t *testing.T) { context := map[string]interface{}{ "network_name": acctest.BootstrapSharedServiceNetworkingConnection(t, "gcnv-network-config-1", acctest.ServiceNetworkWithParentService("netapp.servicenetworking.goog")), "random_suffix": acctest.RandString(t, 10), @@ -139,13 +139,13 @@ func TestAccNetappstoragePool_FlexRegionalStoragePoolCreateExample_update(t *tes acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), - CheckDestroy: testAccCheckNetappstoragePoolDestroyProducer(t), + CheckDestroy: testAccCheckNetappStoragePoolDestroyProducer(t), ExternalProviders: map[string]resource.ExternalProvider{ "time": {}, }, Steps: []resource.TestStep{ { - Config: testAccNetappstoragePool_FlexRegionalStoragePoolCreateExample_full(context), + Config: testAccNetappStoragePool_FlexRegionalStoragePoolCreateExample_full(context), }, { ResourceName: "google_netapp_storage_pool.test_pool", @@ -154,8 +154,8 @@ func TestAccNetappstoragePool_FlexRegionalStoragePoolCreateExample_update(t *tes ImportStateVerifyIgnore: []string{"location", "name", "labels", "terraform_labels"}, }, { - Config: testAccNetappstoragePool_FlexRegionalStoragePoolCreateExample_switchZone(context), - Check: testAccNetappstoragePool_FlexRegionalStoragePoolCreateExample_sleep_5_mins(), + Config: testAccNetappStoragePool_FlexRegionalStoragePoolCreateExample_switchZone(context), + Check: testAccNetappStoragePool_FlexRegionalStoragePoolCreateExample_sleep_5_mins(), }, { ResourceName: "google_netapp_storage_pool.test_pool", @@ -164,7 +164,7 @@ func TestAccNetappstoragePool_FlexRegionalStoragePoolCreateExample_update(t *tes ImportStateVerifyIgnore: []string{"location", "name", "labels", "terraform_labels"}, }, { - Config: testAccNetappstoragePool_FlexRegionalStoragePoolCreateExample_switchBackZone(context), + Config: testAccNetappStoragePool_FlexRegionalStoragePoolCreateExample_switchBackZone(context), }, { ResourceName: "google_netapp_storage_pool.test_pool", @@ -176,7 +176,7 @@ func TestAccNetappstoragePool_FlexRegionalStoragePoolCreateExample_update(t *tes }) } -func testAccNetappstoragePool_FlexRegionalStoragePoolCreateExample_full(context map[string]interface{}) string { +func testAccNetappStoragePool_FlexRegionalStoragePoolCreateExample_full(context map[string]interface{}) string { return acctest.Nprintf(` resource "google_netapp_storage_pool" "test_pool" { provider = google-beta @@ -201,7 +201,7 @@ data "google_compute_network" "default" { `, context) } -func testAccNetappstoragePool_FlexRegionalStoragePoolCreateExample_switchZone(context map[string]interface{}) string { +func testAccNetappStoragePool_FlexRegionalStoragePoolCreateExample_switchZone(context map[string]interface{}) string { return acctest.Nprintf(` resource "google_netapp_storage_pool" "test_pool" { provider = google-beta @@ -226,7 +226,7 @@ data "google_compute_network" "default" { `, context) } -func testAccNetappstoragePool_FlexRegionalStoragePoolCreateExample_sleep_5_mins() resource.TestCheckFunc { +func testAccNetappStoragePool_FlexRegionalStoragePoolCreateExample_sleep_5_mins() resource.TestCheckFunc { return func(s *terraform.State) error { // wait 5 minutes before executing the switchback due to api zone switch issues time.Sleep(5 * time.Minute) @@ -234,7 +234,7 @@ func testAccNetappstoragePool_FlexRegionalStoragePoolCreateExample_sleep_5_mins( } } -func testAccNetappstoragePool_FlexRegionalStoragePoolCreateExample_switchBackZone(context map[string]interface{}) string { +func testAccNetappStoragePool_FlexRegionalStoragePoolCreateExample_switchBackZone(context map[string]interface{}) string { return acctest.Nprintf(` resource "google_netapp_storage_pool" "test_pool" { provider = google-beta diff --git a/mmv1/third_party/terraform/services/netapp/resource_netapp_volume_replication_test.go b/mmv1/third_party/terraform/services/netapp/resource_netapp_volume_replication_test.go index bd612eba155d..626adab8a2dc 100644 --- a/mmv1/third_party/terraform/services/netapp/resource_netapp_volume_replication_test.go +++ b/mmv1/third_party/terraform/services/netapp/resource_netapp_volume_replication_test.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/terraform-provider-google/google/acctest" ) -func TestAccNetappVolumeReplication_netappVolumeReplicationCreateExample_update(t *testing.T) { +func TestAccNetappVolumeReplication_NetappVolumeReplicationCreateExample_update(t *testing.T) { t.Parallel() context := map[string]interface{}{ @@ -25,7 +25,7 @@ func TestAccNetappVolumeReplication_netappVolumeReplicationCreateExample_update( CheckDestroy: testAccCheckNetappVolumeReplicationDestroyProducer(t), Steps: []resource.TestStep{ { - Config: testAccNetappVolumeReplication_netappVolumeReplicationCreateExample_basic(context), + Config: testAccNetappVolumeReplication_NetappVolumeReplicationCreateExample_basic(context), }, { ResourceName: "google_netapp_volume_replication.test_replication", @@ -34,7 +34,7 @@ func TestAccNetappVolumeReplication_netappVolumeReplicationCreateExample_update( ImportStateVerifyIgnore: []string{"destination_volume_parameters", "location", "volume_name", "name", "delete_destination_volume", "replication_enabled", "force_stopping", "wait_for_mirror", "labels", "terraform_labels"}, }, { - Config: testAccNetappVolumeReplication_netappVolumeReplicationCreateExample_stop(context), + Config: testAccNetappVolumeReplication_NetappVolumeReplicationCreateExample_stop(context), }, { ResourceName: "google_netapp_volume_replication.test_replication", @@ -43,7 +43,7 @@ func TestAccNetappVolumeReplication_netappVolumeReplicationCreateExample_update( ImportStateVerifyIgnore: []string{"destination_volume_parameters", "location", "volume_name", "name", "delete_destination_volume", "replication_enabled", "force_stopping", "wait_for_mirror", "labels", "terraform_labels"}, }, { - Config: testAccNetappVolumeReplication_netappVolumeReplicationCreateExample_resume(context), + Config: testAccNetappVolumeReplication_NetappVolumeReplicationCreateExample_resume(context), }, { ResourceName: "google_netapp_volume_replication.test_replication", @@ -52,7 +52,7 @@ func TestAccNetappVolumeReplication_netappVolumeReplicationCreateExample_update( ImportStateVerifyIgnore: []string{"destination_volume_parameters", "location", "volume_name", "name", "delete_destination_volume", "replication_enabled", "force_stopping", "wait_for_mirror", "labels", "terraform_labels"}, }, { - Config: testAccNetappVolumeReplication_netappVolumeReplicationCreateExample_update(context), + Config: testAccNetappVolumeReplication_NetappVolumeReplicationCreateExample_update(context), }, { ResourceName: "google_netapp_volume_replication.test_replication", @@ -65,7 +65,7 @@ func TestAccNetappVolumeReplication_netappVolumeReplicationCreateExample_update( } // Basic replication -func testAccNetappVolumeReplication_netappVolumeReplicationCreateExample_basic(context map[string]interface{}) string { +func testAccNetappVolumeReplication_NetappVolumeReplicationCreateExample_basic(context map[string]interface{}) string { return acctest.Nprintf(` data "google_compute_network" "default" { name = "%{network_name}" @@ -120,7 +120,7 @@ resource "google_netapp_volume_replication" "test_replication" { } // Update parameters -func testAccNetappVolumeReplication_netappVolumeReplicationCreateExample_update(context map[string]interface{}) string { +func testAccNetappVolumeReplication_NetappVolumeReplicationCreateExample_update(context map[string]interface{}) string { return acctest.Nprintf(` data "google_compute_network" "default" { name = "%{network_name}" @@ -182,7 +182,7 @@ resource "google_netapp_volume_replication" "test_replication" { } // Stop replication -func testAccNetappVolumeReplication_netappVolumeReplicationCreateExample_stop(context map[string]interface{}) string { +func testAccNetappVolumeReplication_NetappVolumeReplicationCreateExample_stop(context map[string]interface{}) string { return acctest.Nprintf(` data "google_compute_network" "default" { name = "%{network_name}" @@ -244,7 +244,7 @@ resource "google_netapp_volume_replication" "test_replication" { } // resume replication -func testAccNetappVolumeReplication_netappVolumeReplicationCreateExample_resume(context map[string]interface{}) string { +func testAccNetappVolumeReplication_NetappVolumeReplicationCreateExample_resume(context map[string]interface{}) string { return acctest.Nprintf(` data "google_compute_network" "default" { name = "%{network_name}" diff --git a/mmv1/third_party/terraform/services/netapp/resource_netapp_volume_test.go b/mmv1/third_party/terraform/services/netapp/resource_netapp_volume_test.go index 84c3070c51b7..892cb40bc529 100644 --- a/mmv1/third_party/terraform/services/netapp/resource_netapp_volume_test.go +++ b/mmv1/third_party/terraform/services/netapp/resource_netapp_volume_test.go @@ -18,7 +18,7 @@ import ( transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) -func TestAccNetappVolume_netappVolumeBasicExample_update(t *testing.T) { +func TestAccNetappVolume_NetappVolumeBasicExample_update(t *testing.T) { context := map[string]interface{}{ "network_name": acctest.BootstrapSharedServiceNetworkingConnection(t, "gcnv-network-config-1", acctest.ServiceNetworkWithParentService("netapp.servicenetworking.goog")), "random_suffix": acctest.RandString(t, 10), diff --git a/mmv1/third_party/terraform/services/redis/go/resource_redis_cluster_test.go.tmpl b/mmv1/third_party/terraform/services/redis/go/resource_redis_cluster_test.go.tmpl index 97d45e0d2549..6009bc618f23 100644 --- a/mmv1/third_party/terraform/services/redis/go/resource_redis_cluster_test.go.tmpl +++ b/mmv1/third_party/terraform/services/redis/go/resource_redis_cluster_test.go.tmpl @@ -11,6 +11,7 @@ import ( ) func TestAccRedisCluster_createClusterWithNodeType(t *testing.T) { + t.Parallel() name := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) @@ -22,7 +23,7 @@ func TestAccRedisCluster_createClusterWithNodeType(t *testing.T) { Steps: []resource.TestStep{ { // create cluster with replica count 1 - Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 1, shardCount: 3, preventDestroy: true, nodeType: "REDIS_STANDARD_SMALL", zoneDistributionMode: "MULTI_ZONE"}), + Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 1, shardCount: 3, deletionProtectionEnabled: true, nodeType: "REDIS_STANDARD_SMALL", zoneDistributionMode: "MULTI_ZONE"}), }, { ResourceName: "google_redis_cluster.test", @@ -32,7 +33,7 @@ func TestAccRedisCluster_createClusterWithNodeType(t *testing.T) { }, { // clean up the resource - Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 0, shardCount: 3, preventDestroy: false, nodeType: "REDIS_STANDARD_SMALL", zoneDistributionMode: "MULTI_ZONE"}), + Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 1, shardCount: 3, deletionProtectionEnabled: false, nodeType: "REDIS_STANDARD_SMALL", zoneDistributionMode: "MULTI_ZONE"}), }, }, }) @@ -52,7 +53,7 @@ func TestAccRedisCluster_createClusterWithZoneDistribution(t *testing.T) { Steps: []resource.TestStep{ { // create cluster with replica count 1 - Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 0, shardCount: 3, preventDestroy: false, zoneDistributionMode: "SINGLE_ZONE", zone: "us-central1-b"}), + Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 0, shardCount: 3, deletionProtectionEnabled: false, zoneDistributionMode: "SINGLE_ZONE", zone: "us-central1-b"}), }, { ResourceName: "google_redis_cluster.test", @@ -62,7 +63,7 @@ func TestAccRedisCluster_createClusterWithZoneDistribution(t *testing.T) { }, { // clean up the resource - Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 0, shardCount: 3, preventDestroy: false, zoneDistributionMode: "SINGLE_ZONE", zone: "us-central1-b"}), + Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 0, shardCount: 3, deletionProtectionEnabled: false, zoneDistributionMode: "SINGLE_ZONE", zone: "us-central1-b"}), }, }, }) @@ -81,7 +82,7 @@ func TestAccRedisCluster_updateReplicaCount(t *testing.T) { Steps: []resource.TestStep{ { // create cluster with replica count 1 - Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 1, shardCount: 3, preventDestroy: true, zoneDistributionMode: "MULTI_ZONE"}), + Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 1, shardCount: 3, deletionProtectionEnabled: true, zoneDistributionMode: "MULTI_ZONE"}), }, { ResourceName: "google_redis_cluster.test", @@ -91,7 +92,7 @@ func TestAccRedisCluster_updateReplicaCount(t *testing.T) { }, { // update replica count to 2 - Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 2, shardCount: 3, preventDestroy: true, zoneDistributionMode: "MULTI_ZONE"}), + Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 2, shardCount: 3, deletionProtectionEnabled: true, zoneDistributionMode: "MULTI_ZONE"}), }, { ResourceName: "google_redis_cluster.test", @@ -99,13 +100,9 @@ func TestAccRedisCluster_updateReplicaCount(t *testing.T) { ImportStateVerify: true, ImportStateVerifyIgnore: []string{"psc_configs"}, }, - { - // clean up the resource - Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 1, shardCount: 3, preventDestroy: false, zoneDistributionMode: "MULTI_ZONE"}), - }, { // update replica count to 0 - Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 0, shardCount: 3, preventDestroy: true, zoneDistributionMode: "MULTI_ZONE"}), + Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 0, shardCount: 3, deletionProtectionEnabled: true, zoneDistributionMode: "MULTI_ZONE"}), }, { ResourceName: "google_redis_cluster.test", @@ -115,7 +112,7 @@ func TestAccRedisCluster_updateReplicaCount(t *testing.T) { }, { // clean up the resource - Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 0, shardCount: 3, preventDestroy: false, zoneDistributionMode: "MULTI_ZONE"}), + Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 0, shardCount: 3, deletionProtectionEnabled: false, zoneDistributionMode: "MULTI_ZONE"}), }, }, }) @@ -134,7 +131,7 @@ func TestAccRedisCluster_updateShardCount(t *testing.T) { Steps: []resource.TestStep{ { // create cluster with shard count 3 - Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 1, shardCount: 3, preventDestroy: true, zoneDistributionMode: "MULTI_ZONE"}), + Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 1, shardCount: 3, deletionProtectionEnabled: true, zoneDistributionMode: "MULTI_ZONE"}), }, { ResourceName: "google_redis_cluster.test", @@ -144,7 +141,7 @@ func TestAccRedisCluster_updateShardCount(t *testing.T) { }, { // update shard count to 5 - Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 1, shardCount: 5, preventDestroy: true, zoneDistributionMode: "MULTI_ZONE"}), + Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 1, shardCount: 5, deletionProtectionEnabled: true, zoneDistributionMode: "MULTI_ZONE"}), }, { ResourceName: "google_redis_cluster.test", @@ -154,7 +151,7 @@ func TestAccRedisCluster_updateShardCount(t *testing.T) { }, { // clean up the resource - Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 1, shardCount: 5, preventDestroy: false, zoneDistributionMode: "MULTI_ZONE"}), + Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 1, shardCount: 5, deletionProtectionEnabled: false, zoneDistributionMode: "MULTI_ZONE"}), }, }, }) @@ -213,11 +210,51 @@ func TestAccRedisCluster_updateRedisConfigs(t *testing.T) { }) } +// Validate that deletion protection enabled/disabled cluster is created updated +func TestAccRedisCluster_createUpdateDeletionProtection(t *testing.T) { + t.Parallel() + + name := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckRedisClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + // create cluster with deletion protection set to false + Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 0, shardCount: 3, deletionProtectionEnabled: false, zoneDistributionMode: "MULTI_ZONE"}), + }, + { + ResourceName: "google_redis_cluster.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"psc_configs"}, + }, + { + // update deletion protection to true + Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 0, shardCount: 3, deletionProtectionEnabled: true, zoneDistributionMode: "MULTI_ZONE"}), + }, + { + ResourceName: "google_redis_cluster.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"psc_configs"}, + }, + { + // update deletion protection to false and delete the cluster + Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 0, shardCount: 3, deletionProtectionEnabled: false, zoneDistributionMode: "MULTI_ZONE"}), + }, + + }, + }) +} + type ClusterParams struct { name string replicaCount int shardCount int - preventDestroy bool + deletionProtectionEnabled bool nodeType string redisConfigs map[string]string zoneDistributionMode string @@ -225,13 +262,6 @@ type ClusterParams struct { } func createOrUpdateRedisCluster(params *ClusterParams) string { - lifecycleBlock := "" - if params.preventDestroy { - lifecycleBlock = ` - lifecycle { - prevent_destroy = true - }` - } var strBuilder strings.Builder for key, value := range params.redisConfigs { strBuilder.WriteString(fmt.Sprintf("%s = \"%s\"\n", key, value)) @@ -254,6 +284,7 @@ resource "google_redis_cluster" "test" { replica_count = %d shard_count = %d node_type = "%s" + deletion_protection_enabled = %v region = "us-central1" psc_configs { network = google_compute_network.producer_net.id @@ -263,9 +294,8 @@ resource "google_redis_cluster" "test" { } %s depends_on = [ - google_network_connectivity_service_connection_policy.default - ] - %s + google_network_connectivity_service_connection_policy.default + ] } resource "google_network_connectivity_service_connection_policy" "default" { @@ -293,7 +323,7 @@ resource "google_compute_network" "producer_net" { name = "%s" auto_create_subnetworks = false } -`, params.name, params.replicaCount, params.shardCount, params.nodeType, strBuilder.String(), zoneDistributionConfigBlock, lifecycleBlock, params.name, params.name, params.name) +`, params.name, params.replicaCount, params.shardCount, params.nodeType, params.deletionProtectionEnabled, strBuilder.String(), zoneDistributionConfigBlock, params.name, params.name, params.name) } {{ end }} diff --git a/mmv1/third_party/terraform/services/resourcemanager/go/resource_google_project_service.go.tmpl b/mmv1/third_party/terraform/services/resourcemanager/go/resource_google_project_service.go.tmpl index 4e005bdafe2c..d1a304e96938 100644 --- a/mmv1/third_party/terraform/services/resourcemanager/go/resource_google_project_service.go.tmpl +++ b/mmv1/third_party/terraform/services/resourcemanager/go/resource_google_project_service.go.tmpl @@ -3,6 +3,7 @@ package resourcemanager import ( "fmt" "log" + "regexp" "strings" "time" @@ -331,6 +332,11 @@ func disableServiceUsageProjectService(service, project string, d *schema.Resour ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.ServiceUsageServiceBeingActivated}, }) if err != nil { + {{- if not (eq $.TargetVersionName "ga") }} + if res, _ := regexp.MatchString("COMMON_SU_SERVICE_HAS_USAGE", err.Error()); res { + return fmt.Errorf("Error disabling service %q for project %q: %v", service, project, strings.Replace(err.Error(), "check_if_service_has_usage=SKIP", "check_if_service_has_usage_on_destroy=false", -1)) + } + {{- end }} return fmt.Errorf("Error disabling service %q for project %q: %v", service, project, err) } return nil From 13a85741556a50a5be628e6241a4b64315dd5f33 Mon Sep 17 00:00:00 2001 From: abd-goog <156919569+abd-goog@users.noreply.github.com> Date: Tue, 3 Sep 2024 21:56:05 +0530 Subject: [PATCH 17/60] Add `tags` field to Project resource (#11440) --- .../terraform/acctest/bootstrap_test_utils.go | 138 ++++++++++++++++++ .../resource_google_project.go | 12 ++ .../resource_google_project_test.go | 72 +++++++++ .../docs/r/google_project.html.markdown | 15 ++ 4 files changed, 237 insertions(+) diff --git a/mmv1/third_party/terraform/acctest/bootstrap_test_utils.go b/mmv1/third_party/terraform/acctest/bootstrap_test_utils.go index b169e89456d3..0a4869b32dd6 100644 --- a/mmv1/third_party/terraform/acctest/bootstrap_test_utils.go +++ b/mmv1/third_party/terraform/acctest/bootstrap_test_utils.go @@ -1304,3 +1304,141 @@ func SetupProjectsAndGetAccessToken(org, billing, pid, service string, config *t return accessToken, nil } + +const sharedTagKeyPrefix = "tf-bootstrap-tagkey" + +func BootstrapSharedTestTagKey(t *testing.T, testId string) string { + org := envvar.GetTestOrgFromEnv(t) + sharedTagKey := fmt.Sprintf("%s-%s", sharedTagKeyPrefix, testId) + tagKeyName := fmt.Sprintf("%s/%s", org, sharedTagKey) + + config := BootstrapConfig(t) + if config == nil { + return "" + } + + log.Printf("[DEBUG] Getting shared test tag key %q", sharedTagKey) + getURL := fmt.Sprintf("%stagKeys/namespaced?name=%s", config.TagsBasePath, tagKeyName) + _, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: getURL, + UserAgent: config.UserAgent, + Timeout: 2 * time.Minute, + }) + if err != nil && transport_tpg.IsGoogleApiErrorWithCode(err, 403) { + log.Printf("[DEBUG] TagKey %q not found, bootstrapping", sharedTagKey) + tagKeyObj := map[string]interface{}{ + "parent": "organizations/" + org, + "shortName": sharedTagKey, + "description": "Bootstrapped tag key for Terraform Acceptance testing", + } + + _, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: config.Project, + RawURL: config.TagsBasePath + "tagKeys/", + UserAgent: config.UserAgent, + Body: tagKeyObj, + Timeout: 10 * time.Minute, + }) + if err != nil { + t.Fatalf("Error bootstrapping shared tag key %q: %s", sharedTagKey, err) + } + + log.Printf("[DEBUG] Waiting for shared tag key creation to finish") + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: getURL, + UserAgent: config.UserAgent, + Timeout: 2 * time.Minute, + }) + + if err != nil { + t.Fatalf("Error getting shared tag key %q: %s", sharedTagKey, err) + } + + return sharedTagKey +} + +const sharedTagValuePrefix = "tf-bootstrap-tagvalue" + +func BootstrapSharedTestTagValue(t *testing.T, testId string, tagKey string) string { + org := envvar.GetTestOrgFromEnv(t) + sharedTagValue := fmt.Sprintf("%s-%s", sharedTagValuePrefix, testId) + tagKeyName := fmt.Sprintf("%s/%s", org, tagKey) + tagValueName := fmt.Sprintf("%s/%s", tagKeyName, sharedTagValue) + + config := BootstrapConfig(t) + if config == nil { + return "" + } + + log.Printf("[DEBUG] Getting shared test tag value %q", sharedTagValue) + getURL := fmt.Sprintf("%stagValues/namespaced?name=%s", config.TagsBasePath, tagValueName) + _, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: getURL, + UserAgent: config.UserAgent, + Timeout: 2 * time.Minute, + }) + if err != nil && transport_tpg.IsGoogleApiErrorWithCode(err, 403) { + log.Printf("[DEBUG] TagValue %q not found, bootstrapping", sharedTagValue) + log.Printf("[DEBUG] Fetching permanent id for tagkey %s", tagKeyName) + tagKeyGetURL := fmt.Sprintf("%stagKeys/namespaced?name=%s", config.TagsBasePath, tagKeyName) + tagKeyResponse, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: tagKeyGetURL, + UserAgent: config.UserAgent, + Timeout: 2 * time.Minute, + }) + if err != nil { + t.Fatalf("Error getting tag key id for %s : %s", tagKeyName, err) + } + tagKeyObj := map[string]interface{}{ + "parent": tagKeyResponse["name"].(string), + "shortName": sharedTagValue, + "description": "Bootstrapped tag value for Terraform Acceptance testing", + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: config.Project, + RawURL: config.TagsBasePath + "tagValues/", + UserAgent: config.UserAgent, + Body: tagKeyObj, + Timeout: 10 * time.Minute, + }) + if err != nil { + t.Fatalf("Error bootstrapping shared tag value %q: %s", sharedTagValue, err) + } + + log.Printf("[DEBUG] Waiting for shared tag value creation to finish") + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: getURL, + UserAgent: config.UserAgent, + Timeout: 2 * time.Minute, + }) + + if err != nil { + t.Fatalf("Error getting shared tag value %q: %s", sharedTagValue, err) + } + + return sharedTagValue +} diff --git a/mmv1/third_party/terraform/services/resourcemanager/resource_google_project.go b/mmv1/third_party/terraform/services/resourcemanager/resource_google_project.go index ca151fe79b6d..077152128a2f 100644 --- a/mmv1/third_party/terraform/services/resourcemanager/resource_google_project.go +++ b/mmv1/third_party/terraform/services/resourcemanager/resource_google_project.go @@ -133,6 +133,14 @@ func ResourceGoogleProject() *schema.Resource { Description: `All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.`, Elem: &schema.Schema{Type: schema.TypeString}, }, + + "tags": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored when empty.`, + }, }, UseJSONNumber: true, } @@ -166,6 +174,10 @@ func resourceGoogleProjectCreate(d *schema.ResourceData, meta interface{}) error project.Labels = tpgresource.ExpandEffectiveLabels(d) } + if _, ok := d.GetOk("tags"); ok { + project.Tags = tpgresource.ExpandStringMap(d, "tags") + } + var op *cloudresourcemanager.Operation err = transport_tpg.Retry(transport_tpg.RetryOptions{ RetryFunc: func() (reqErr error) { diff --git a/mmv1/third_party/terraform/services/resourcemanager/resource_google_project_test.go b/mmv1/third_party/terraform/services/resourcemanager/resource_google_project_test.go index e32f0db67e2c..bca4d08b667c 100644 --- a/mmv1/third_party/terraform/services/resourcemanager/resource_google_project_test.go +++ b/mmv1/third_party/terraform/services/resourcemanager/resource_google_project_test.go @@ -234,6 +234,46 @@ func TestAccProject_migrateParent(t *testing.T) { }) } +// Test that a Project resource can be created with tags +func TestAccProject_tags(t *testing.T) { + t.Parallel() + + org := envvar.GetTestOrgFromEnv(t) + pid := fmt.Sprintf("%s-%d", TestPrefix, acctest.RandInt(t)) + tagKey := acctest.BootstrapSharedTestTagKey(t, "crm-projects-tagkey") + tagValue := acctest.BootstrapSharedTestTagValue(t, "crm-projects-tagvalue", tagKey) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ExternalProviders: map[string]resource.ExternalProvider{ + "time": {}, + }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccProject_tags(pid, org, map[string]string{org + "/" + tagKey: tagValue}), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleProjectExists("google_project.acceptance", pid), + ), + }, + // Make sure import supports tags + { + ResourceName: "google_project.acceptance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"tags", "deletion_policy"}, // we don't read tags back + }, + // Update tags tries to replace project but fails due to deletion policy + { + Config: testAccProject_tags(pid, org, map[string]string{}), + ExpectError: regexp.MustCompile("deletion_policy"), + }, + { + Config: testAccProject_tagsAllowDestroy(pid, org, map[string]string{org + "/" + tagKey: tagValue}), + }, + }, + }) +} + func testAccCheckGoogleProjectExists(r, pid string) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[r] @@ -553,3 +593,35 @@ resource "google_folder" "folder1" { } `, pid, pid, org, folderName, org) } + +func testAccProject_tags(pid, org string, tags map[string]string) string { + r := fmt.Sprintf(` +resource "google_project" "acceptance" { + project_id = "%s" + name = "%s" + org_id = "%s" + tags = {`, pid, pid, org) + + l := "" + for key, value := range tags { + l += fmt.Sprintf("%q = %q\n", key, value) + } + l += fmt.Sprintf("}\n}") + return r + l +} + +func testAccProject_tagsAllowDestroy(pid, org string, tags map[string]string) string { + r := fmt.Sprintf(`resource "google_project" "acceptance" { + project_id = "%s" + name = "%s" + org_id = "%s" + deletion_policy = "DELETE" + tags = {`, pid, pid, org) + l := "" + for key, value := range tags { + l += fmt.Sprintf("%q = %q\n", key, value) + } + + l += fmt.Sprintf("}\n}") + return r + l +} diff --git a/mmv1/third_party/terraform/website/docs/r/google_project.html.markdown b/mmv1/third_party/terraform/website/docs/r/google_project.html.markdown index 6d45074bb2d5..fea0203890ea 100644 --- a/mmv1/third_party/terraform/website/docs/r/google_project.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/google_project.html.markdown @@ -20,6 +20,8 @@ doc for more information. ~> It is recommended to use the `constraints/compute.skipDefaultNetworkCreation` [constraint](/docs/providers/google/r/google_organization_policy.html) to remove the default network instead of setting `auto_create_network` to false, when possible. +~> It may take a while for the attached tag bindings to be deleted after the project is scheduled to be deleted. + To get more information about projects, see: * [API documentation](https://cloud.google.com/resource-manager/reference/rest/v1/projects) @@ -51,6 +53,17 @@ resource "google_folder" "department1" { } ``` +To create a project with a tag + +```hcl +resource "google_project" "my_project" { + name = "My Project" + project_id = "your-project-id" + org_id = "1234567" + tags = {"1234567/env":"staging"} +} +``` + ## Argument Reference The following arguments are supported: @@ -100,6 +113,8 @@ The following arguments are supported: to be abandoned rather than deleted, i.e., the Terraform resource can be deleted without deleting the Project via the Google API. Possible values are: "PREVENT", "ABANDON", "DELETE". Default value is `PREVENT`. +* `tags` - (Optional) A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored when empty. The field is immutable and causes resource replacement when mutated. + ## Attributes Reference In addition to the arguments listed above, the following computed attributes are From 5cdc1d834dc60f3d6d747a197e99b007f0466e24 Mon Sep 17 00:00:00 2001 From: Mauricio Alvarez Leon <65101411+BBBmau@users.noreply.github.com> Date: Tue, 3 Sep 2024 13:16:13 -0700 Subject: [PATCH 18/60] `storage`: fix `google_storage_bucket` 429 Error (#11550) --- .../terraform/services/storage/resource_storage_bucket.go.erb | 1 + 1 file changed, 1 insertion(+) diff --git a/mmv1/third_party/terraform/services/storage/resource_storage_bucket.go.erb b/mmv1/third_party/terraform/services/storage/resource_storage_bucket.go.erb index 8df5c23a81c8..49df5d889a8c 100644 --- a/mmv1/third_party/terraform/services/storage/resource_storage_bucket.go.erb +++ b/mmv1/third_party/terraform/services/storage/resource_storage_bucket.go.erb @@ -709,6 +709,7 @@ func resourceStorageBucketCreate(d *schema.ResourceData, meta interface{}) error return err }, Timeout: d.Timeout(schema.TimeoutCreate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.Is429RetryableQuotaError}, }) if err != nil { From b39a08d38486870b16887e4ba8f323c166b04254 Mon Sep 17 00:00:00 2001 From: Cameron Thornton Date: Tue, 3 Sep 2024 15:37:20 -0500 Subject: [PATCH 19/60] go rewrite - initial concurrency (#11627) --- mmv1/main.go | 151 +++++++++++++++++++++---------------- mmv1/provider/terraform.go | 2 +- 2 files changed, 88 insertions(+), 65 deletions(-) diff --git a/mmv1/main.go b/mmv1/main.go index 851f05737390..fbedea96bd5d 100644 --- a/mmv1/main.go +++ b/mmv1/main.go @@ -9,6 +9,7 @@ import ( "path/filepath" "sort" "strings" + "sync" "time" "golang.org/x/exp/slices" @@ -17,6 +18,8 @@ import ( "github.com/GoogleCloudPlatform/magic-modules/mmv1/provider" ) +var wg sync.WaitGroup + // TODO Q2: additional flags // Example usage: --output $GOPATH/src/github.com/terraform-providers/terraform-provider-google-beta @@ -100,98 +103,118 @@ func main() { // Building compute takes a long time and can't be parallelized within the product // so lets build it first sort.Slice(allProductFiles, func(i int, j int) bool { - if allProductFiles[i] == "compute" { + if allProductFiles[i] == "products/compute" { return true } return false }) + var providerToGenerate *provider.Terraform + var productsForVersion []*api.Product + + ch := make(chan string, len(allProductFiles)) + for _, pf := range allProductFiles { + ch <- pf + } + + for i := 0; i < len(allProductFiles); i++ { + wg.Add(1) + go GenerateProduct(ch, providerToGenerate, &productsForVersion, startTime, productsToGenerate, generateCode, generateDocs) + } + wg.Wait() + + close(ch) + + slices.SortFunc(productsForVersion, func(p1, p2 *api.Product) int { + return strings.Compare(strings.ToLower(p1.Name), strings.ToLower(p2.Name)) + }) + // In order to only copy/compile files once per provider this must be called outside // of the products loop. This will get called with the provider from the final iteration // of the loop - var providerToGenerate *provider.Terraform - var productsForVersion []*api.Product - for _, productName := range allProductFiles { - productYamlPath := path.Join(productName, "go_product.yaml") + providerToGenerate = provider.NewTerraform(productsForVersion[0], *version, startTime) - // TODO Q2: uncomment the error check that if the product.yaml exists for each product - // after Go-converted product.yaml files are complete for all products - // if _, err := os.Stat(productYamlPath); errors.Is(err, os.ErrNotExist) { - // log.Fatalf("%s does not contain a product.yaml file", productName) - // } + providerToGenerate.CopyCommonFiles(*outputPath, generateCode, generateDocs) + + log.Printf("Compiling common files for terraform") + if generateCode { + providerToGenerate.CompileCommonFiles(*outputPath, productsForVersion, "") // TODO Q2: product overrides + } +} - if _, err := os.Stat(productYamlPath); err == nil { - var resources []*api.Resource = make([]*api.Resource, 0) +func GenerateProduct(productChannel chan string, providerToGenerate *provider.Terraform, productsForVersion *[]*api.Product, startTime time.Time, productsToGenerate []string, generateCode, generateDocs bool) { - productApi := &api.Product{} - api.Compile(productYamlPath, productApi) + defer wg.Done() + productName := <-productChannel - if !productApi.ExistsAtVersionOrLower(*version) { - log.Printf("%s does not have a '%s' version, skipping", productName, *version) - continue - } + productYamlPath := path.Join(productName, "go_product.yaml") - resourceFiles, err := filepath.Glob(fmt.Sprintf("%s/*", productName)) - if err != nil { - log.Fatalf("Cannot get resources files: %v", err) - } - for _, resourceYamlPath := range resourceFiles { - if filepath.Base(resourceYamlPath) == "product.yaml" || filepath.Ext(resourceYamlPath) != ".yaml" { - continue - } - - // Prepend "go_" to the Go yaml files' name to distinguish with the ruby yaml files - if filepath.Base(resourceYamlPath) == "go_product.yaml" || !strings.HasPrefix(filepath.Base(resourceYamlPath), "go_") { - continue - } - - resource := &api.Resource{} - api.Compile(resourceYamlPath, resource) - - resource.TargetVersionName = *version - resource.Properties = resource.AddLabelsRelatedFields(resource.PropertiesWithExcluded(), nil) - resource.SetDefault(productApi) - resource.Validate() - resources = append(resources, resource) - } + // TODO Q2: uncomment the error check that if the product.yaml exists for each product + // after Go-converted product.yaml files are complete for all products + // if _, err := os.Stat(productYamlPath); errors.Is(err, os.ErrNotExist) { + // log.Fatalf("%s does not contain a product.yaml file", productName) + // } - // TODO Q2: override resources + // TODO Q2: product overrides - // Sort resources by name - sort.Slice(resources, func(i, j int) bool { - return resources[i].Name < resources[j].Name - }) + if _, err := os.Stat(productYamlPath); err == nil { + var resources []*api.Resource = make([]*api.Resource, 0) - productApi.Objects = resources - productApi.Validate() + productApi := &api.Product{} + api.Compile(productYamlPath, productApi) - // TODO Q2: set other providers via flag - providerToGenerate = provider.NewTerraform(productApi, *version, startTime) + if !productApi.ExistsAtVersionOrLower(*version) { + log.Printf("%s does not have a '%s' version, skipping", productName, *version) + return + } - productsForVersion = append(productsForVersion, productApi) + resourceFiles, err := filepath.Glob(fmt.Sprintf("%s/*", productName)) + if err != nil { + log.Fatalf("Cannot get resources files: %v", err) + } + for _, resourceYamlPath := range resourceFiles { + if filepath.Base(resourceYamlPath) == "product.yaml" || filepath.Ext(resourceYamlPath) != ".yaml" { + continue + } - if !slices.Contains(productsToGenerate, productName) { - log.Printf("%s not specified, skipping generation", productName) + // Prepend "go_" to the Go yaml files' name to distinguish with the ruby yaml files + if filepath.Base(resourceYamlPath) == "go_product.yaml" || !strings.HasPrefix(filepath.Base(resourceYamlPath), "go_") { continue } - log.Printf("%s: Generating files", productName) - providerToGenerate.Generate(*outputPath, productName, generateCode, generateDocs) + resource := &api.Resource{} + api.Compile(resourceYamlPath, resource) + + resource.TargetVersionName = *version + resource.Properties = resource.AddLabelsRelatedFields(resource.PropertiesWithExcluded(), nil) + resource.SetDefault(productApi) + resource.Validate() + resources = append(resources, resource) } - } - slices.SortFunc(productsForVersion, func(p1, p2 *api.Product) int { - return strings.Compare(strings.ToLower(p1.Name), strings.ToLower(p2.Name)) - }) + // TODO Q2: override resources - providerToGenerate.CopyCommonFiles(*outputPath, generateCode, generateDocs) + // Sort resources by name + sort.Slice(resources, func(i, j int) bool { + return resources[i].Name < resources[j].Name + }) - log.Printf("Compiling common files for terraform") - if generateCode { - providerToGenerate.CompileCommonFiles(*outputPath, productsForVersion, "") + productApi.Objects = resources + productApi.Validate() - // TODO Q2: product overrides + // TODO Q2: set other providers via flag + providerToGenerate = provider.NewTerraform(productApi, *version, startTime) + + *productsForVersion = append(*productsForVersion, productApi) + + if !slices.Contains(productsToGenerate, productName) { + log.Printf("%s not specified, skipping generation", productName) + return + } + + log.Printf("%s: Generating files", productName) + providerToGenerate.Generate(*outputPath, productName, generateCode, generateDocs) } } diff --git a/mmv1/provider/terraform.go b/mmv1/provider/terraform.go index e1dc95237850..5ffc49dee51b 100644 --- a/mmv1/provider/terraform.go +++ b/mmv1/provider/terraform.go @@ -104,7 +104,7 @@ func (t *Terraform) GenerateObject(object api.Resource, outputFolder, productPat t.GenerateResource(object, *templateData, outputFolder, generateCode, generateDocs) if generateCode { - log.Printf("Generating %s tests", object.Name) + // log.Printf("Generating %s tests", object.Name) t.GenerateResourceTests(object, *templateData, outputFolder) t.GenerateResourceSweeper(object, *templateData, outputFolder) } From 9754bacfc8f5b7cb94e440d05982ee9c357a6a9b Mon Sep 17 00:00:00 2001 From: Cameron Thornton Date: Tue, 3 Sep 2024 15:39:17 -0500 Subject: [PATCH 20/60] go rewrite - general refresh and diffs 9/3 (#11626) --- mmv1/api/resource.go | 16 +- mmv1/api/type.go | 2 +- mmv1/description-copy.go | 3 + .../ServicePerimeterDryRunEgressPolicy.yaml | 164 - .../accesscontextmanager/go_AccessPolicy.yaml | 6 +- .../go_ServicePerimeter.yaml | 5 +- ...go_ServicePerimeterDryRunEgressPolicy.yaml | 189 + ...o_ServicePerimeterDryRunIngressPolicy.yaml | 198 + .../go_ServicePerimeterDryRunResource.yaml | 6 +- .../go_ServicePerimeterEgressPolicy.yaml | 2 + .../go_ServicePerimeterIngressPolicy.yaml | 7 +- mmv1/products/activedirectory/go_Domain.yaml | 30 +- .../activedirectory/go_DomainTrust.yaml | 1 + mmv1/products/activedirectory/go_Peering.yaml | 2 +- mmv1/products/alloydb/go_Backup.yaml | 1 - mmv1/products/alloydb/go_Cluster.yaml | 45 +- mmv1/products/alloydb/go_Instance.yaml | 14 +- mmv1/products/apigateway/go_Api.yaml | 1 - mmv1/products/apigateway/go_ApiConfig.yaml | 1 - mmv1/products/apigateway/go_Gateway.yaml | 1 - .../appengine/go_FlexibleAppVersion.yaml | 4 +- .../appengine/go_ServiceNetworkSettings.yaml | 2 +- .../appengine/go_ServiceSplitTraffic.yaml | 2 +- .../appengine/go_StandardAppVersion.yaml | 3 +- mmv1/products/apphub/Application.yaml | 2 +- mmv1/products/apphub/Service.yaml | 2 +- mmv1/products/apphub/Workload.yaml | 2 +- mmv1/products/apphub/go_Application.yaml | 2 +- mmv1/products/apphub/go_Service.yaml | 2 +- mmv1/products/apphub/go_Workload.yaml | 2 +- .../products/artifactregistry/Repository.yaml | 26 +- .../artifactregistry/go_Repository.yaml | 28 +- mmv1/products/backupdr/BackupVault.yaml | 129 +- mmv1/products/backupdr/go_BackupVault.yaml | 178 + mmv1/products/backupdr/go_product.yaml | 2 +- .../products/beyondcorp/go_AppConnection.yaml | 1 - mmv1/products/beyondcorp/go_AppConnector.yaml | 1 - mmv1/products/beyondcorp/go_AppGateway.yaml | 1 - mmv1/products/bigquery/go_DatasetAccess.yaml | 2 + mmv1/products/bigquery/go_Table.yaml | 8 +- .../bigqueryanalyticshub/DataExchange.yaml | 4 +- .../bigqueryanalyticshub/Listing.yaml | 6 +- .../bigqueryanalyticshub/go_DataExchange.yaml | 41 +- .../bigqueryanalyticshub/go_Listing.yaml | 32 +- .../bigqueryanalyticshub/go_product.yaml | 2 +- .../bigquerydatatransfer/go_Config.yaml | 23 + .../bigqueryreservation/go_Reservation.yaml | 5 - mmv1/products/billing/go_ProjectInfo.yaml | 2 +- .../binaryauthorization/go_Policy.yaml | 1 + .../go_BlockchainNodes.yaml | 1 - .../certificatemanager/go_Certificate.yaml | 1 - .../go_CertificateIssuanceConfig.yaml | 1 - .../certificatemanager/go_CertificateMap.yaml | 1 - .../go_CertificateMapEntry.yaml | 1 - .../go_DnsAuthorization.yaml | 1 - .../certificatemanager/go_TrustConfig.yaml | 1 - mmv1/products/clouddeploy/go_Automation.yaml | 1 - .../clouddeploy/go_CustomTargetType.yaml | 1 - .../clouddomains/go_Registration.yaml | 1 - .../cloudfunctions/go_CloudFunction.yaml | 1 - .../products/cloudfunctions2/go_Function.yaml | 2 +- .../cloudquotas/go_QuotaPreference.yaml | 1 + mmv1/products/cloudrunv2/go_Job.yaml | 42 +- mmv1/products/cloudrunv2/go_Service.yaml | 51 +- mmv1/products/cloudtasks/go_Queue.yaml | 173 + .../composer/go_UserWorkloadsConfigMap.yaml | 1 + mmv1/products/compute/go_Address.yaml | 2 +- mmv1/products/compute/go_Autoscaler.yaml | 2 +- mmv1/products/compute/go_BackendBucket.yaml | 2 +- .../compute/go_BackendBucketSignedUrlKey.yaml | 2 +- mmv1/products/compute/go_BackendService.yaml | 22 +- .../go_BackendServiceSignedUrlKey.yaml | 2 +- mmv1/products/compute/go_Disk.yaml | 2 +- .../go_DiskResourcePolicyAttachment.yaml | 2 +- .../compute/go_ExternalVpnGateway.yaml | 2 +- mmv1/products/compute/go_Firewall.yaml | 6 +- mmv1/products/compute/go_ForwardingRule.yaml | 11 +- mmv1/products/compute/go_GlobalAddress.yaml | 3 +- .../compute/go_GlobalForwardingRule.yaml | 3 +- .../compute/go_GlobalNetworkEndpoint.yaml | 2 +- .../go_GlobalNetworkEndpointGroup.yaml | 2 +- mmv1/products/compute/go_HaVpnGateway.yaml | 2 +- mmv1/products/compute/go_HealthCheck.yaml | 15 +- mmv1/products/compute/go_HttpHealthCheck.yaml | 2 +- .../products/compute/go_HttpsHealthCheck.yaml | 2 +- mmv1/products/compute/go_Image.yaml | 2 +- mmv1/products/compute/go_Instance.yaml | 8 +- mmv1/products/compute/go_InstanceGroup.yaml | 2 +- .../compute/go_InstanceGroupManager.yaml | 2 +- .../compute/go_InstanceGroupMembership.yaml | 2 +- .../compute/go_InstanceGroupNamedPort.yaml | 2 +- .../products/compute/go_InstanceSettings.yaml | 2 +- mmv1/products/compute/go_Interconnect.yaml | 14 +- .../compute/go_InterconnectAttachment.yaml | 2 +- mmv1/products/compute/go_MachineImage.yaml | 2 +- .../compute/go_ManagedSslCertificate.yaml | 3 +- mmv1/products/compute/go_Network.yaml | 2 +- .../go_NetworkEdgeSecurityService.yaml | 2 +- mmv1/products/compute/go_NetworkEndpoint.yaml | 2 +- .../compute/go_NetworkEndpointGroup.yaml | 2 +- .../products/compute/go_NetworkEndpoints.yaml | 4 +- .../compute/go_NetworkFirewallPolicy.yaml | 2 +- .../go_NetworkPeeringRoutesConfig.yaml | 3 +- mmv1/products/compute/go_NodeGroup.yaml | 2 +- mmv1/products/compute/go_NodeTemplate.yaml | 24 +- mmv1/products/compute/go_PacketMirroring.yaml | 2 +- .../compute/go_PerInstanceConfig.yaml | 2 +- .../compute/go_ProjectCloudArmorTier.yaml | 2 +- .../compute/go_PublicAdvertisedPrefix.yaml | 2 +- .../compute/go_PublicDelegatedPrefix.yaml | 2 +- .../products/compute/go_RegionAutoscaler.yaml | 2 +- .../compute/go_RegionBackendService.yaml | 28 +- .../products/compute/go_RegionCommitment.yaml | 2 +- mmv1/products/compute/go_RegionDisk.yaml | 2 +- ...go_RegionDiskResourcePolicyAttachment.yaml | 2 +- .../compute/go_RegionHealthCheck.yaml | 2 +- .../go_RegionInstanceGroupManager.yaml | 2 +- .../compute/go_RegionNetworkEndpoint.yaml | 2 +- .../go_RegionNetworkEndpointGroup.yaml | 2 +- .../go_RegionNetworkFirewallPolicy.yaml | 2 +- .../compute/go_RegionPerInstanceConfig.yaml | 2 +- .../compute/go_RegionSecurityPolicy.yaml | 2 +- .../compute/go_RegionSecurityPolicyRule.yaml | 2 +- .../compute/go_RegionSslCertificate.yaml | 10 +- mmv1/products/compute/go_RegionSslPolicy.yaml | 2 +- .../compute/go_RegionTargetHttpProxy.yaml | 2 +- .../compute/go_RegionTargetHttpsProxy.yaml | 11 +- .../compute/go_RegionTargetTcpProxy.yaml | 2 +- mmv1/products/compute/go_RegionUrlMap.yaml | 2 +- mmv1/products/compute/go_Reservation.yaml | 2 +- mmv1/products/compute/go_ResizeRequest.yaml | 2 +- mmv1/products/compute/go_ResourcePolicy.yaml | 10 +- mmv1/products/compute/go_Route.yaml | 2 +- mmv1/products/compute/go_Router.yaml | 2 +- mmv1/products/compute/go_RouterNat.yaml | 2 +- .../compute/go_RouterRoutePolicy.yaml | 2 +- .../compute/go_SecurityPolicyRule.yaml | 2 +- .../compute/go_ServiceAttachment.yaml | 2 +- mmv1/products/compute/go_SslCertificate.yaml | 10 +- mmv1/products/compute/go_SslPolicy.yaml | 2 +- mmv1/products/compute/go_Subnetwork.yaml | 55 +- mmv1/products/compute/go_TargetGrpcProxy.yaml | 2 +- mmv1/products/compute/go_TargetHttpProxy.yaml | 2 +- .../products/compute/go_TargetHttpsProxy.yaml | 12 +- mmv1/products/compute/go_TargetInstance.yaml | 2 +- mmv1/products/compute/go_TargetSslProxy.yaml | 2 +- mmv1/products/compute/go_TargetTcpProxy.yaml | 2 +- mmv1/products/compute/go_UrlMap.yaml | 2 +- mmv1/products/compute/go_VpnGateway.yaml | 2 +- mmv1/products/compute/go_VpnTunnel.yaml | 2 +- .../containerattached/go_Cluster.yaml | 3 +- .../go_ConnectionProfile.yaml | 69 +- .../go_PrivateConnection.yaml | 3 - mmv1/products/dataform/go_Repository.yaml | 11 +- mmv1/products/datafusion/go_Instance.yaml | 3 +- mmv1/products/dataplex/go_AspectType.yaml | 1 - mmv1/products/dataplex/go_Datascan.yaml | 2 +- mmv1/products/dataplex/go_EntryGroup.yaml | 1 - mmv1/products/dataplex/go_EntryType.yaml | 1 - mmv1/products/dataplex/go_Task.yaml | 2 +- mmv1/products/datastream/Stream.yaml | 4 - .../datastream/go_ConnectionProfile.yaml | 1 - .../datastream/go_PrivateConnection.yaml | 2 +- mmv1/products/datastream/go_Stream.yaml | 42 +- mmv1/products/dialogflowcx/go_Intent.yaml | 1 - .../discoveryengine/go_DataStore.yaml | 100 +- mmv1/products/discoveryengine/go_Schema.yaml | 103 + mmv1/products/dlp/go_DiscoveryConfig.yaml | 69 +- mmv1/products/dns/go_ManagedZone.yaml | 429 ++ mmv1/products/dns/go_Policy.yaml | 159 + mmv1/products/dns/go_ResponsePolicy.yaml | 90 + mmv1/products/dns/go_ResponsePolicyRule.yaml | 131 + mmv1/products/dns/go_product.yaml | 24 + mmv1/products/edgecontainer/go_Cluster.yaml | 2 + mmv1/products/edgenetwork/go_Network.yaml | 2 +- mmv1/products/edgenetwork/go_Subnet.yaml | 2 +- mmv1/products/firebase/go_Project.yaml | 6 +- mmv1/products/firebasehosting/go_Channel.yaml | 1 - .../firebasehosting/go_CustomDomain.yaml | 4 + mmv1/products/firebasehosting/go_Version.yaml | 5 + mmv1/products/firestore/go_Database.yaml | 3 - mmv1/products/firestore/go_Document.yaml | 3 + mmv1/products/firestore/go_Field.yaml | 3 + mmv1/products/firestore/go_Index.yaml | 3 +- mmv1/products/gkebackup/go_BackupPlan.yaml | 1 - mmv1/products/gkebackup/go_RestorePlan.yaml | 1 - mmv1/products/gkehub/go_Membership.yaml | 1 - mmv1/products/gkehub2/go_Feature.yaml | 8 +- .../gkehub2/go_MembershipBinding.yaml | 1 - mmv1/products/gkehub2/go_Namespace.yaml | 1 - mmv1/products/gkehub2/go_Scope.yaml | 1 - .../gkehub2/go_ScopeRBACRoleBinding.yaml | 1 - mmv1/products/iap/go_AppEngineService.yaml | 1 + mmv1/products/integrations/go_Client.yaml | 27 +- mmv1/products/kms/go_AutokeyConfig.yaml | 1 + mmv1/products/kms/go_EkmConnection.yaml | 7 + mmv1/products/kms/go_KeyHandle.yaml | 3 +- mmv1/products/logging/go_LogView.yaml | 1 - mmv1/products/managedkafka/go_Cluster.yaml | 10 +- .../go_ConnectivityTest.yaml | 1 - .../networksecurity/go_AddressGroup.yaml | 1 - .../go_AuthorizationPolicy.yaml | 1 - .../networksecurity/go_ClientTlsPolicy.yaml | 23 +- .../networksecurity/go_FirewallEndpoint.yaml | 1 - .../go_FirewallEndpointAssociation.yaml | 7 +- .../networksecurity/go_SecurityProfile.yaml | 1 - .../go_SecurityProfileGroup.yaml | 1 - .../networksecurity/go_ServerTlsPolicy.yaml | 26 - .../networkservices/go_EdgeCacheKeyset.yaml | 1 - .../networkservices/go_EdgeCacheOrigin.yaml | 1 - .../networkservices/go_EdgeCacheService.yaml | 1 - .../networkservices/go_EndpointPolicy.yaml | 1 - mmv1/products/networkservices/go_Gateway.yaml | 1 - .../networkservices/go_GrpcRoute.yaml | 1 - .../networkservices/go_HttpRoute.yaml | 1 - .../networkservices/go_LbRouteExtension.yaml | 1 - .../go_LbTrafficExtension.yaml | 1 - mmv1/products/networkservices/go_Mesh.yaml | 1 - .../networkservices/go_ServiceBinding.yaml | 1 - .../networkservices/go_ServiceLbPolicies.yaml | 1 - .../products/networkservices/go_TcpRoute.yaml | 10 +- mmv1/products/notebooks/go_Location.yaml | 1 - mmv1/products/parallelstore/Instance.yaml | 4 +- mmv1/products/parallelstore/go_Instance.yaml | 4 +- mmv1/products/privateca/go_CaPool.yaml | 1 - mmv1/products/privateca/go_Certificate.yaml | 1 - .../privateca/go_CertificateAuthority.yaml | 1 - .../privateca/go_CertificateTemplate.yaml | 1 - mmv1/products/pubsub/go_Schema.yaml | 1 + mmv1/products/pubsub/go_Subscription.yaml | 13 +- mmv1/products/pubsub/go_Topic.yaml | 3 +- mmv1/products/redis/go_Cluster.yaml | 119 + .../go_FolderNotificationConfig.yaml | 130 + .../securitycenterv2/go_FolderMuteConfig.yaml | 118 + .../go_FolderNotificationConfig.yaml | 138 + .../go_FolderSccBigQueryExports.yaml | 152 + .../go_OrganizationMuteConfig.yaml | 113 + .../go_OrganizationNotificationConfig.yaml | 134 + .../go_OrganizationSccBigQueryExports.yaml | 148 + .../go_OrganizationSource.yaml | 88 + .../go_ProjectMuteConfig.yaml | 106 + .../go_ProjectNotificationConfig.yaml | 131 + .../go_ProjectSccBigQueryExports.yaml | 143 + .../products/securitycenterv2/go_product.yaml | 23 + mmv1/products/vertexai/go_Dataset.yaml | 1 - mmv1/products/vertexai/go_Endpoint.yaml | 1 - mmv1/products/vertexai/go_FeatureGroup.yaml | 1 - .../vertexai/go_FeatureGroupFeature.yaml | 1 - .../vertexai/go_FeatureOnlineStore.yaml | 2 +- .../go_FeatureOnlineStoreFeatureview.yaml | 2 +- mmv1/products/vertexai/go_Featurestore.yaml | 1 - .../vertexai/go_FeaturestoreEntitytype.yaml | 1 - .../go_FeaturestoreEntitytypeFeature.yaml | 1 - mmv1/products/vertexai/go_Index.yaml | 1 - mmv1/products/vertexai/go_IndexEndpoint.yaml | 1 - .../go_IndexEndpointDeployedIndex.yaml | 297 ++ mmv1/products/vertexai/go_Tensorboard.yaml | 1 - .../vmwareengine/go_ExternalAccessRule.yaml | 2 +- .../vmwareengine/go_ExternalAddress.yaml | 3 +- .../vmwareengine/go_PrivateCloud.yaml | 1 + mmv1/products/vpcaccess/go_Connector.yaml | 12 +- mmv1/products/workflows/go_Workflow.yaml | 141 + mmv1/products/workflows/go_product.yaml | 36 + .../products/workstations/go_Workstation.yaml | 162 + .../workstations/go_WorkstationCluster.yaml | 237 + .../workstations/go_WorkstationConfig.yaml | 680 +++ mmv1/products/workstations/go_product.yaml | 22 + mmv1/provider/template_data.go | 15 +- mmv1/provider/terraform.go | 8 +- mmv1/provider/terraform.rb | 3 + .../go/bigquery_dataset_access.go.tmpl | 16 +- .../go/compute_resource_policy.go.tmpl | 5 + .../constants/go/notebooks_instance.go.tmpl | 3 +- .../terraform/constants/go/subnetwork.tmpl | 31 + .../go/vmwareengine_private_cloud.go.tmpl | 46 + .../constants/go/workbench_instance.go.tmpl | 109 +- ...udquotas_quota_preference_trace_id.go.tmpl | 3 + .../go/name_or_name_prefix.go.tmpl | 7 +- .../go/string_to_lower_case.go.tmpl | 19 + ...ctions2_function_source_generation.go.tmpl | 38 + ...discoveryengine_schema_json_schema.go.tmpl | 23 + .../go/service_directory_service.go.tmpl | 1 + ...x_ai_index_endpoint_deployed_index.go.tmpl | 17 + .../decoders/go/backend_service.go.tmpl | 12 - .../go/bigquery_data_transfer.go.tmpl | 13 + .../go/region_backend_service.go.tmpl | 13 +- ...x_ai_index_endpoint_deployed_index.go.tmpl | 20 + .../encoders/go/backend_service.go.tmpl | 18 - .../go/bigquery_data_transfer.go.tmpl | 20 +- .../compute_region_target_https_proxy.go.tmpl | 10 +- .../go/compute_target_https_proxy.go.tmpl | 10 +- .../go/region_backend_service.go.tmpl | 17 - ...x_ai_index_endpoint_deployed_index.go.tmpl | 7 + .../terraform/encoders/go/workflow.go.tmpl | 7 +- .../examples/apphub_application_full.tf.erb | 2 +- .../examples/apphub_service_full.tf.erb | 2 +- .../examples/apphub_workload_full.tf.erb | 2 +- .../artifact_registry_repository_basic.tf.erb | 2 +- ...rtifact_registry_repository_cleanup.tf.erb | 2 +- ...artifact_registry_repository_docker.tf.erb | 2 +- ...artifact_registry_repository_remote.tf.erb | 2 +- ...fact_registry_repository_remote_apt.tf.erb | 2 +- ...tory_remote_docker_custom_with_auth.tf.erb | 2 +- ...ry_repository_remote_dockerhub_auth.tf.erb | 2 +- ...itory_remote_maven_custom_with_auth.tf.erb | 2 +- ...ository_remote_npm_custom_with_auth.tf.erb | 2 +- ...tory_remote_python_custom_with_auth.tf.erb | 2 +- ...fact_registry_repository_remote_yum.tf.erb | 2 +- ...rtifact_registry_repository_virtual.tf.erb | 6 +- ...ry_analyticshub_data_exchange_basic.tf.erb | 2 +- ...uery_analyticshub_data_exchange_dcr.tf.erb | 2 +- ...bigquery_analyticshub_listing_basic.tf.erb | 6 +- .../bigquery_analyticshub_listing_dcr.tf.erb | 6 +- ...ery_analyticshub_listing_restricted.tf.erb | 6 +- ...ce_perimeter_dry_run_egress_policy.tf.tmpl | 36 + ...e_perimeter_dry_run_ingress_policy.tf.tmpl | 39 + .../go/active_directory_domain_basic.tf.tmpl | 2 +- ...ctive_directory_domain_trust_basic.tf.tmpl | 1 + .../go/active_directory_peering_basic.tf.tmpl | 2 + .../go/alloydb_cluster_restore.tf.tmpl | 1 - .../go/alloydb_instance_basic_test.tf.tmpl | 1 - .../go/alloydb_instance_psc_test.tf.tmpl | 21 + .../alloydb_secondary_instance_basic.tf.tmpl | 2 +- .../examples/go/alloydb_user_builtin.tf.tmpl | 3 +- .../go/alloydb_user_builtin_test.tf.tmpl | 1 - .../examples/go/alloydb_user_iam_test.tf.tmpl | 1 - ...environment_keyvaluemaps_beta_test.tf.tmpl | 1 + ...ent_keyvaluemaps_entries_beta_test.tf.tmpl | 1 + ...ironment_keyvaluemaps_entries_test.tf.tmpl | 1 + ...igee_environment_keyvaluemaps_test.tf.tmpl | 1 + .../go/apphub_application_full.tf.tmpl | 2 +- .../examples/go/apphub_service_full.tf.tmpl | 2 +- .../examples/go/apphub_workload_full.tf.tmpl | 2 +- ...artifact_registry_repository_basic.tf.tmpl | 2 +- ...tifact_registry_repository_cleanup.tf.tmpl | 2 +- ...rtifact_registry_repository_docker.tf.tmpl | 2 +- ...rtifact_registry_repository_remote.tf.tmpl | 2 +- ...act_registry_repository_remote_apt.tf.tmpl | 2 +- ...ory_remote_docker_custom_with_auth.tf.tmpl | 2 +- ...y_repository_remote_dockerhub_auth.tf.tmpl | 2 +- ...tory_remote_maven_custom_with_auth.tf.tmpl | 2 +- ...sitory_remote_npm_custom_with_auth.tf.tmpl | 2 +- ...ory_remote_python_custom_with_auth.tf.tmpl | 2 +- ...act_registry_repository_remote_yum.tf.tmpl | 2 +- ...tifact_registry_repository_virtual.tf.tmpl | 6 +- .../go/backend_service_external_iap.tf.tmpl | 1 + ...service_traffic_director_ring_hash.tf.tmpl | 10 +- .../go/backup_dr_backup_vault_full.tf.tmpl | 18 + ...y_analyticshub_data_exchange_basic.tf.tmpl | 2 +- ...ery_analyticshub_data_exchange_dcr.tf.tmpl | 9 + ...igquery_analyticshub_listing_basic.tf.tmpl | 6 +- .../bigquery_analyticshub_listing_dcr.tf.tmpl | 60 + ...ry_analyticshub_listing_restricted.tf.tmpl | 6 +- .../go/bigquery_connection_kms.tf.tmpl | 10 + .../bigquerydatatransfer_config_cmek.tf.tmpl | 46 + ...uerydatatransfer_config_salesforce.tf.tmpl | 21 + ...loud_tasks_queue_http_target_oauth.tf.tmpl | 41 + ...cloud_tasks_queue_http_target_oidc.tf.tmpl | 41 + .../examples/go/cloudfunctions2_full.tf.tmpl | 3 +- .../examples/go/cloudrunv2_job_sql.tf.tmpl | 1 - ...e_health_check_http_source_regions.tf.tmpl | 1 - ..._health_check_https_source_regions.tf.tmpl | 1 - ...te_health_check_tcp_source_regions.tf.tmpl | 1 - ...onnection_profile_existing_alloydb.tf.tmpl | 55 + ..._connection_profile_existing_mysql.tf.tmpl | 25 + ...nnection_profile_existing_postgres.tf.tmpl | 25 + .../examples/go/dataform_repository.tf.tmpl | 30 + ...m_repository_with_cloudsource_repo.tf.tmpl | 34 + .../go/datastream_stream_bigquery.tf.tmpl | 1 - .../discoveryengine_datastore_basic.tf.tmpl | 15 +- ..._document_processing_config_layout.tf.tmpl | 20 + .../go/discoveryengine_schema_basic.tf.tmpl | 17 + .../go/dlp_discovery_config_actions.tf.tmpl | 42 + ...iscovery_config_conditions_cadence.tf.tmpl | 3 + ...basehosting_customdomain_cloud_run.tf.tmpl | 2 + .../firebasehosting_version_cloud_run.tf.tmpl | 2 + .../go/firestore_cmek_database.tf.tmpl | 9 - ...re_cmek_database_in_datastore_mode.tf.tmpl | 9 - .../go/integrations_client_full.tf.tmpl | 2 +- .../examples/go/kms_key_handle_basic.tf.tmpl | 1 + ...ecurity_client_tls_policy_advanced.tf.tmpl | 6 - ...k_security_client_tls_policy_basic.tf.tmpl | 1 - ...irewall_endpoint_association_basic.tf.tmpl | 21 +- ...ecurity_server_tls_policy_advanced.tf.tmpl | 1 - ...k_security_server_tls_policy_basic.tf.tmpl | 11 - ...rk_security_server_tls_policy_mtls.tf.tmpl | 3 - ...rity_server_tls_policy_server_cert.tf.tmpl | 1 - ...network_services_tcp_route_actions.tf.tmpl | 1 + .../go/node_template_accelerators.tf.tmpl | 15 + .../go/parallelstore_instance_basic.tf.tmpl | 2 - ...sub_subscription_push_cloudstorage.tf.tmpl | 1 + ...ubscription_push_cloudstorage_avro.tf.tmpl | 2 + .../examples/go/redis_cluster_ha.tf.tmpl | 11 + .../go/redis_cluster_ha_single_zone.tf.tmpl | 11 + ...ion_backend_service_balancing_mode.tf.tmpl | 12 +- ...egion_backend_service_external_iap.tf.tmpl | 1 + .../go/resource_policy_hourly_format.tf.tmpl | 12 + ...c_folder_notification_config_basic.tf.tmpl | 20 + ...lder_big_query_export_config_basic.tf.tmpl | 32 + .../scc_v2_folder_mute_config_basic.tf.tmpl | 13 + ...2_folder_notification_config_basic.tf.tmpl | 20 + ...tion_big_query_export_config_basic.tf.tmpl | 26 + .../scc_v2_organization_source_basic.tf.tmpl | 5 + ...ject_big_query_export_config_basic.tf.tmpl | 26 + .../scc_v2_project_mute_config_basic.tf.tmpl | 8 + ..._project_notification_config_basic.tf.tmpl | 15 + .../go/storage_managed_folder_basic.tf.tmpl | 5 +- ...subnetwork_reserved_internal_range.tf.tmpl | 25 + ...ubnetwork_reserved_secondary_range.tf.tmpl | 42 + .../examples/go/tpu_node_full.tf.tmpl | 1 + .../examples/go/tpu_node_full_test.tf.tmpl | 9 +- ...deployed_index_automatic_resources.tf.tmpl | 67 + ...ndex_endpoint_deployed_index_basic.tf.tmpl | 77 + ..._endpoint_deployed_index_basic_two.tf.tmpl | 80 + ...deployed_index_dedicated_resources.tf.tmpl | 69 + ...vmware_engine_network_policy_basic.tf.tmpl | 4 +- .../examples/go/vpc_access_connector.tf.tmpl | 2 + .../vpc_access_connector_shared_vpc.tf.tmpl | 2 + .../examples/redis_cluster_ha.tf.erb | 2 +- .../redis_cluster_ha_single_zone.tf.erb | 2 +- .../terraform/examples/tpu_node_full.tf.erb | 1 + .../examples/tpu_node_full_test.tf.erb | 13 +- .../go/ssl_certificate.tmpl | 6 +- .../service_management_consumer.tf.erb | 1 + .../post_delete/go/private_cloud.go.tmpl | 11 +- .../scc_v1_folder_notification_config.go.tmpl | 12 + .../scc_v2_folder_notification_config.go.tmpl | 12 + ...ganization_big_query_export_config.go.tmpl | 12 + .../post_update/go/compute_subnetwork.go.tmpl | 72 + ...s_context_manager_dry_run_resource.go.tmpl | 1 + .../go/vmwareengine_private_cloud.go.tmpl | 15 + .../go/cloudrunv2_job_deletion_policy.go.tmpl | 3 + ...cloudrunv2_service_deletion_policy.go.tmpl | 3 + .../pre_delete/go/private_connection.go.tmpl | 5 + ...x_ai_index_endpoint_deployed_index.go.tmpl | 3 + .../go/bigtable_app_profile.go.tmpl | 10 + .../pre_update/go/spanner_database.go.tmpl | 3 +- .../property_documentation.html.markdown.tmpl | 2 +- .../go/spanner_database.go.tmpl | 3 +- ...x_ai_index_endpoint_deployed_index.go.tmpl | 3 + mmv1/templates/terraform/yaml_conversion.erb | 5 +- .../terraform/acctest/go/test_utils.go.tmpl | 5 +- mmv1/third_party/terraform/go/go.mod | 2 +- mmv1/third_party/terraform/go/main.go.tmpl | 10 - ...context_manager_access_policy_test.go.tmpl | 2 + ...p_engine_flexible_app_version_test.go.tmpl | 445 ++ ...source_backup_dr_backup_vault_test.go.tmpl | 97 + .../go/resource_bigquery_dataset_test.go.tmpl | 867 ---- .../go/resource_bigquery_table.go.tmpl | 2961 ------------ .../go/resource_bigquery_table_test.go.tmpl | 4261 ----------------- ...source_binary_authorization_policy_test.go | 8 +- ...ource_cloudfunctions_function_test.go.tmpl | 4 + ...ntity_group_transitive_memberships.go.tmpl | 179 + ...resource_cloud_identity_group_test.go.tmpl | 7 +- .../resource_cloud_run_service_test.go.tmpl | 4 + .../go/resource_cloud_run_v2_job_test.go.tmpl | 42 +- ...resource_cloud_run_v2_service_test.go.tmpl | 60 +- .../go/resource_cloud_tasks_queue_test.go | 155 + .../go/resource_composer_environment.go.tmpl | 1 - ...resource_composer_environment_test.go.tmpl | 5 + .../go/compute_instance_helpers.go.tmpl | 23 +- .../go/data_source_google_compute_instance.go | 2 +- ...ource_compute_backend_service_test.go.tmpl | 7 +- .../go/resource_compute_disk_test.go.tmpl | 4 +- ...ource_compute_firewall_policy_rule_test.go | 148 +- .../resource_compute_firewall_policy_test.go | 5 +- .../resource_compute_global_address_test.go | 84 + ...resource_compute_health_check_test.go.tmpl | 6 +- .../go/resource_compute_instance.go.tmpl | 84 +- ...ompute_instance_from_machine_image.go.tmpl | 11 - ...e_instance_from_machine_image_test.go.tmpl | 361 +- ...rce_compute_instance_from_template.go.tmpl | 11 - ...ompute_instance_from_template_test.go.tmpl | 483 +- ...resource_compute_instance_settings_test.go | 2 +- ...resource_compute_instance_template.go.tmpl | 26 +- ...rce_compute_instance_template_test.go.tmpl | 111 +- .../go/resource_compute_instance_test.go.tmpl | 634 ++- ...mpute_network_firewall_policy_rule_test.go | 2 + ...ompute_region_backend_service_test.go.tmpl | 18 +- ...e_compute_region_instance_template.go.tmpl | 29 +- ...pute_region_instance_template_test.go.tmpl | 47 +- ...ute_region_target_https_proxy_test.go.tmpl | 992 ++++ .../resource_compute_subnetwork_test.go.tmpl | 126 +- ...ce_compute_target_https_proxy_test.go.tmpl | 240 +- .../services/container/go/node_config.go.tmpl | 88 +- .../go/resource_container_cluster.go.tmpl | 219 +- ...source_container_cluster_migratev1.go.tmpl | 7 + .../resource_container_cluster_test.go.tmpl | 778 ++- .../resource_container_node_pool_test.go.tmpl | 128 +- ...ce_dataflow_flex_template_job_test.go.tmpl | 2 +- .../resource_dataform_repository_test.go.tmpl | 2 +- .../go/resource_dataproc_cluster_test.go | 2 + .../go/resource_dialogflow_agent_test.go | 2 +- .../go/resource_dns_record_set_test.go.tmpl | 130 + ...e_firebase_android_app_config_test.go.tmpl | 2 +- ...ce_gke_hub_feature_membership_test.go.tmpl | 2 + .../go/resource_gke_hub_feature_test.go.tmpl | 83 + .../iam2/go/resource_iam_deny_policy_test.go | 2 +- .../go/resource_kms_crypto_key_test.go.tmpl | 1309 +++++ ...rk_security_client_tls_policy_test.go.tmpl | 1 + ...ce_network_services_tcp_route_test.go.tmpl | 2 + ...source_parallelstore_instance_test.go.tmpl | 23 +- .../go/resource_redis_cluster_test.go.tmpl | 55 +- ...esource_google_project_iam_binding_test.go | 5 +- ...resource_google_project_iam_member_test.go | 5 +- ...resource_google_project_iam_policy_test.go | 12 +- .../resource_google_project_service.go.tmpl | 40 +- ...source_google_project_service_test.go.tmpl | 42 +- .../go/resource_sql_database_instance.go.tmpl | 2553 ++++++++++ .../storage/go/resource_storage_bucket.go | 54 +- .../go/resource_storage_bucket_test.go | 35 +- 511 files changed, 16863 insertions(+), 9902 deletions(-) create mode 100644 mmv1/products/accesscontextmanager/go_ServicePerimeterDryRunEgressPolicy.yaml create mode 100644 mmv1/products/accesscontextmanager/go_ServicePerimeterDryRunIngressPolicy.yaml create mode 100644 mmv1/products/backupdr/go_BackupVault.yaml create mode 100644 mmv1/products/discoveryengine/go_Schema.yaml create mode 100644 mmv1/products/dns/go_ManagedZone.yaml create mode 100644 mmv1/products/dns/go_Policy.yaml create mode 100644 mmv1/products/dns/go_ResponsePolicy.yaml create mode 100644 mmv1/products/dns/go_ResponsePolicyRule.yaml create mode 100644 mmv1/products/dns/go_product.yaml create mode 100644 mmv1/products/securitycenter/go_FolderNotificationConfig.yaml create mode 100644 mmv1/products/securitycenterv2/go_FolderMuteConfig.yaml create mode 100644 mmv1/products/securitycenterv2/go_FolderNotificationConfig.yaml create mode 100644 mmv1/products/securitycenterv2/go_FolderSccBigQueryExports.yaml create mode 100644 mmv1/products/securitycenterv2/go_OrganizationMuteConfig.yaml create mode 100644 mmv1/products/securitycenterv2/go_OrganizationNotificationConfig.yaml create mode 100644 mmv1/products/securitycenterv2/go_OrganizationSccBigQueryExports.yaml create mode 100644 mmv1/products/securitycenterv2/go_OrganizationSource.yaml create mode 100644 mmv1/products/securitycenterv2/go_ProjectMuteConfig.yaml create mode 100644 mmv1/products/securitycenterv2/go_ProjectNotificationConfig.yaml create mode 100644 mmv1/products/securitycenterv2/go_ProjectSccBigQueryExports.yaml create mode 100644 mmv1/products/securitycenterv2/go_product.yaml create mode 100644 mmv1/products/vertexai/go_IndexEndpointDeployedIndex.yaml create mode 100644 mmv1/products/workflows/go_Workflow.yaml create mode 100644 mmv1/products/workflows/go_product.yaml create mode 100644 mmv1/products/workstations/go_Workstation.yaml create mode 100644 mmv1/products/workstations/go_WorkstationCluster.yaml create mode 100644 mmv1/products/workstations/go_WorkstationConfig.yaml create mode 100644 mmv1/products/workstations/go_product.yaml create mode 100644 mmv1/templates/terraform/constants/go/compute_resource_policy.go.tmpl create mode 100644 mmv1/templates/terraform/custom_expand/go/cloudquotas_quota_preference_trace_id.go.tmpl create mode 100644 mmv1/templates/terraform/custom_expand/go/string_to_lower_case.go.tmpl create mode 100644 mmv1/templates/terraform/custom_flatten/go/cloudfunctions2_function_source_generation.go.tmpl create mode 100644 mmv1/templates/terraform/custom_flatten/go/discoveryengine_schema_json_schema.go.tmpl create mode 100644 mmv1/templates/terraform/custom_import/go/vertex_ai_index_endpoint_deployed_index.go.tmpl create mode 100644 mmv1/templates/terraform/decoders/go/vertex_ai_index_endpoint_deployed_index.go.tmpl create mode 100644 mmv1/templates/terraform/encoders/go/vertex_ai_index_endpoint_deployed_index.go.tmpl create mode 100644 mmv1/templates/terraform/examples/go/access_context_manager_service_perimeter_dry_run_egress_policy.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/access_context_manager_service_perimeter_dry_run_ingress_policy.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/alloydb_instance_psc_test.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/backup_dr_backup_vault_full.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/bigquery_analyticshub_data_exchange_dcr.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/bigquery_analyticshub_listing_dcr.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/bigquerydatatransfer_config_cmek.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/bigquerydatatransfer_config_salesforce.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/cloud_tasks_queue_http_target_oauth.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/cloud_tasks_queue_http_target_oidc.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/database_migration_service_connection_profile_existing_alloydb.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/database_migration_service_connection_profile_existing_mysql.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/database_migration_service_connection_profile_existing_postgres.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/discoveryengine_datastore_document_processing_config_layout.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/discoveryengine_schema_basic.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/node_template_accelerators.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/resource_policy_hourly_format.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/scc_folder_notification_config_basic.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/scc_v2_folder_big_query_export_config_basic.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/scc_v2_folder_mute_config_basic.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/scc_v2_folder_notification_config_basic.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/scc_v2_organization_big_query_export_config_basic.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/scc_v2_organization_source_basic.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/scc_v2_project_big_query_export_config_basic.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/scc_v2_project_mute_config_basic.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/scc_v2_project_notification_config_basic.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/subnetwork_reserved_internal_range.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/subnetwork_reserved_secondary_range.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/vertex_ai_index_endpoint_deployed_index_automatic_resources.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/vertex_ai_index_endpoint_deployed_index_basic.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/vertex_ai_index_endpoint_deployed_index_basic_two.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/vertex_ai_index_endpoint_deployed_index_dedicated_resources.tf.tmpl create mode 100644 mmv1/templates/terraform/post_import/go/scc_v1_folder_notification_config.go.tmpl create mode 100644 mmv1/templates/terraform/post_import/go/scc_v2_folder_notification_config.go.tmpl create mode 100644 mmv1/templates/terraform/post_import/go/scc_v2_organization_big_query_export_config.go.tmpl create mode 100644 mmv1/templates/terraform/post_update/go/compute_subnetwork.go.tmpl create mode 100644 mmv1/templates/terraform/pre_create/go/access_context_manager_dry_run_resource.go.tmpl create mode 100644 mmv1/templates/terraform/pre_create/go/vmwareengine_private_cloud.go.tmpl create mode 100644 mmv1/templates/terraform/pre_delete/go/cloudrunv2_job_deletion_policy.go.tmpl create mode 100644 mmv1/templates/terraform/pre_delete/go/cloudrunv2_service_deletion_policy.go.tmpl create mode 100644 mmv1/templates/terraform/pre_delete/go/private_connection.go.tmpl create mode 100644 mmv1/templates/terraform/pre_delete/go/vertex_ai_index_endpoint_deployed_index.go.tmpl create mode 100644 mmv1/templates/terraform/update_encoder/go/vertex_ai_index_endpoint_deployed_index.go.tmpl create mode 100644 mmv1/third_party/terraform/services/appengine/go/resource_app_engine_flexible_app_version_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/backupdr/go/resource_backup_dr_backup_vault_test.go.tmpl delete mode 100644 mmv1/third_party/terraform/services/bigquery/go/resource_bigquery_dataset_test.go.tmpl delete mode 100644 mmv1/third_party/terraform/services/bigquery/go/resource_bigquery_table.go.tmpl delete mode 100644 mmv1/third_party/terraform/services/bigquery/go/resource_bigquery_table_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/cloudidentity/go/data_source_cloud_identity_group_transitive_memberships.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_region_target_https_proxy_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/kms/go/resource_kms_crypto_key_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/sql/go/resource_sql_database_instance.go.tmpl diff --git a/mmv1/api/resource.go b/mmv1/api/resource.go index 0e589f1b64cc..f45908b8da70 100644 --- a/mmv1/api/resource.go +++ b/mmv1/api/resource.go @@ -248,6 +248,9 @@ type Resource struct { StateUpgraders bool `yaml:"state_upgraders"` + // Do not apply the default attribution label + SkipAttributionLabel bool `yaml:"skip_attribution_label"` + // This block inserts the named function and its attribute into the // resource schema -- the code for the migrate_state function must // be included in the resource constants or come from tpgresource @@ -545,7 +548,11 @@ func (r *Resource) AddLabelsRelatedFields(props []*Type, parent *Type) []*Type { // def add_labels_fields(props, parent, labels) func (r *Resource) addLabelsFields(props []*Type, parent *Type, labels *Type) []*Type { if parent == nil || parent.FlattenObject { - r.CustomDiff = append(r.CustomDiff, "tpgresource.SetLabelsDiff") + if r.SkipAttributionLabel { + r.CustomDiff = append(r.CustomDiff, "tpgresource.SetLabelsDiffWithoutAttributionLabel") + } else { + r.CustomDiff = append(r.CustomDiff, "tpgresource.SetLabelsDiff") + } } else if parent.Name == "metadata" { r.CustomDiff = append(r.CustomDiff, "tpgresource.SetMetadataLabelsDiff") } @@ -832,12 +839,7 @@ func (r Resource) ClientNamePascal() string { } func (r Resource) PackageName() string { - clientName := r.ProductMetadata.ClientName - if clientName == "" { - clientName = r.ProductMetadata.Name - } - - return strings.ToLower(clientName) + return strings.ToLower(r.ProductMetadata.Name) } // In order of preference, use TF override, diff --git a/mmv1/api/type.go b/mmv1/api/type.go index 1f2e4d26e489..b83028e81b5f 100644 --- a/mmv1/api/type.go +++ b/mmv1/api/type.go @@ -709,7 +709,7 @@ func (t Type) Deprecated() bool { } func (t *Type) GetDescription() string { - return strings.TrimRight(t.Description, "\n") + return strings.TrimSpace(strings.TrimRight(t.Description, "\n")) } // // private diff --git a/mmv1/description-copy.go b/mmv1/description-copy.go index 1cd004b31a2d..77b72b6466f7 100644 --- a/mmv1/description-copy.go +++ b/mmv1/description-copy.go @@ -39,6 +39,9 @@ func CopyText(identifier string, last bool) { } for _, productPath := range allProductFiles { + if strings.Contains(productPath, "healthcare") { + continue + } // Gather go and ruby file pairs yamlMap := make(map[string][]string) yamlPaths, err := filepath.Glob(fmt.Sprintf("%s/*", productPath)) diff --git a/mmv1/products/accesscontextmanager/ServicePerimeterDryRunEgressPolicy.yaml b/mmv1/products/accesscontextmanager/ServicePerimeterDryRunEgressPolicy.yaml index aadd137e0aff..064ad347710a 100644 --- a/mmv1/products/accesscontextmanager/ServicePerimeterDryRunEgressPolicy.yaml +++ b/mmv1/products/accesscontextmanager/ServicePerimeterDryRunEgressPolicy.yaml @@ -161,167 +161,3 @@ properties: description: | Value for permission should be a valid Cloud IAM permission for the corresponding `serviceName` in `ApiOperation`. -# Copyright 2018 Google Inc. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. ---- !ruby/object:Api::Resource -name: 'ServicePerimeterDryRunEgressPolicy' -create_url: '{{perimeter}}' -base_url: '' -self_link: '{{perimeter}}' -create_verb: :PATCH -delete_verb: :PATCH -update_mask: true -immutable: true -identity: - - egressFrom - - egressTo -nested_query: !ruby/object:Api::Resource::NestedQuery - modify_by_patch: true - is_list_of_ids: false - keys: - - spec - - egressPolicies -references: !ruby/object:Api::Resource::ReferenceLinks - api: 'https://cloud.google.com/access-context-manager/docs/reference/rest/v1/accessPolicies.servicePerimeters#egresspolicy' -description: | - Manage a single EgressPolicy in the spec (dry-run) configuration for a service perimeter. - EgressPolicies match requests based on egressFrom and egressTo stanzas. - For an EgressPolicy to match, both egressFrom and egressTo stanzas must be matched. - If an EgressPolicy matches a request, the request is allowed to span the ServicePerimeter - boundary. For example, an EgressPolicy can be used to allow VMs on networks - within the ServicePerimeter to access a defined set of projects outside the - perimeter in certain contexts (e.g. to read data from a Cloud Storage bucket - or query against a BigQuery dataset). - - ~> **Note:** By default, updates to this resource will remove the EgressPolicy from the - from the perimeter and add it back in a non-atomic manner. To ensure that the new EgressPolicy - is added before the old one is removed, add a `lifecycle` block with `create_before_destroy = true` to this resource. -examples: - - !ruby/object:Provider::Terraform::Examples - name: 'access_context_manager_service_perimeter_dry_run_egress_policy' - skip_test: true -autogen_async: true -exclude_tgc: true -# Skipping the sweeper due to the non-standard base_url and because this is fine-grained under ServicePerimeter -skip_sweeper: true -exclude_import: true -id_format: '{{perimeter}}' -import_format: ['{{perimeter}}'] -mutex: '{{perimeter}}' -custom_code: !ruby/object:Provider::Terraform::CustomCode - custom_import: templates/terraform/custom_import/access_context_manager_service_perimeter_ingress_policy.go.erb - pre_update: templates/terraform/pre_create/access_context_manager_dry_run_resource.go.erb - pre_create: templates/terraform/pre_create/access_context_manager_dry_run_resource.go.erb - pre_delete: templates/terraform/pre_create/access_context_manager_dry_run_resource.go.erb -parameters: - - !ruby/object:Api::Type::ResourceRef - name: 'perimeter' - resource: 'ServicePerimeter' - imports: 'name' - description: | - The name of the Service Perimeter to add this resource to. - required: true - url_param_only: true -properties: - - !ruby/object:Api::Type::NestedObject - name: 'egressFrom' - description: | - Defines conditions on the source of a request causing this `EgressPolicy` to apply. - properties: - - !ruby/object:Api::Type::Enum - name: 'identityType' - description: | - Specifies the type of identities that are allowed access to outside the - perimeter. If left unspecified, then members of `identities` field will - be allowed access. - values: - - :ANY_IDENTITY - - :ANY_USER_ACCOUNT - - :ANY_SERVICE_ACCOUNT - - !ruby/object:Api::Type::Array - name: 'identities' - description: | - A list of identities that are allowed access through this `EgressPolicy`. - Should be in the format of email address. The email address should - represent individual user or service account only. - item_type: Api::Type::String - - !ruby/object:Api::Type::Array - name: 'sources' - description: 'Sources that this EgressPolicy authorizes access from.' - item_type: !ruby/object:Api::Type::NestedObject - properties: - - !ruby/object:Api::Type::String - name: 'accessLevel' - description: 'An AccessLevel resource name that allows resources outside the ServicePerimeter to be accessed from the inside.' - - !ruby/object:Api::Type::Enum - name: 'sourceRestriction' - description: 'Whether to enforce traffic restrictions based on `sources` field. If the `sources` field is non-empty, then this field must be set to `SOURCE_RESTRICTION_ENABLED`.' - values: - - :SOURCE_RESTRICTION_UNSPECIFIED - - :SOURCE_RESTRICTION_ENABLED - - :SOURCE_RESTRICTION_DISABLED - - !ruby/object:Api::Type::NestedObject - name: 'egressTo' - description: | - Defines the conditions on the `ApiOperation` and destination resources that - cause this `EgressPolicy` to apply. - properties: - - !ruby/object:Api::Type::Array - name: 'resources' - item_type: Api::Type::String - description: | - A list of resources, currently only projects in the form - `projects/`, that match this to stanza. A request matches - if it contains a resource in this list. If * is specified for resources, - then this `EgressTo` rule will authorize access to all resources outside - the perimeter. - - !ruby/object:Api::Type::Array - name: 'externalResources' - item_type: Api::Type::String - description: | - A list of external resources that are allowed to be accessed. A request - matches if it contains an external resource in this list (Example: - s3://bucket/path). Currently '*' is not allowed. - - !ruby/object:Api::Type::Array - name: 'operations' - description: | - A list of `ApiOperations` that this egress rule applies to. A request matches - if it contains an operation/service in this list. - item_type: !ruby/object:Api::Type::NestedObject - properties: - - !ruby/object:Api::Type::String - name: 'serviceName' - description: | - The name of the API whose methods or permissions the `IngressPolicy` or - `EgressPolicy` want to allow. A single `ApiOperation` with serviceName - field set to `*` will allow all methods AND permissions for all services. - - !ruby/object:Api::Type::Array - name: 'methodSelectors' - description: | - API methods or permissions to allow. Method or permission must belong - to the service specified by `serviceName` field. A single MethodSelector - entry with `*` specified for the `method` field will allow all methods - AND permissions for the service specified in `serviceName`. - item_type: !ruby/object:Api::Type::NestedObject - properties: - - !ruby/object:Api::Type::String - name: 'method' - description: | - Value for `method` should be a valid method name for the corresponding - `serviceName` in `ApiOperation`. If `*` used as value for method, - then ALL methods and permissions are allowed. - - !ruby/object:Api::Type::String - name: 'permission' - description: | - Value for permission should be a valid Cloud IAM permission for the - corresponding `serviceName` in `ApiOperation`. diff --git a/mmv1/products/accesscontextmanager/go_AccessPolicy.yaml b/mmv1/products/accesscontextmanager/go_AccessPolicy.yaml index 80787619400d..039589f75bb7 100644 --- a/mmv1/products/accesscontextmanager/go_AccessPolicy.yaml +++ b/mmv1/products/accesscontextmanager/go_AccessPolicy.yaml @@ -84,7 +84,7 @@ parameters: type: String description: | The parent of this AccessPolicy in the Cloud Resource Hierarchy. - Format: organizations/{organization_id} + Format: 'organizations/{{organization_id}}' required: true immutable: true - name: 'title' @@ -96,7 +96,7 @@ parameters: type: Array description: | Folder or project on which this policy is applicable. - Format: folders/{{folder_id}} or projects/{{project_id}} + Format: 'folders/{{folder_id}}' or 'projects/{{project_number}}' item_type: type: String max_size: 1 @@ -104,7 +104,7 @@ properties: - name: 'name' type: String description: | - Resource name of the AccessPolicy. Format: {policy_id} + Resource name of the AccessPolicy. Format: '{{policy_id}}' output: true custom_flatten: 'templates/terraform/custom_flatten/go/name_from_self_link.tmpl' - name: 'createTime' diff --git a/mmv1/products/accesscontextmanager/go_ServicePerimeter.yaml b/mmv1/products/accesscontextmanager/go_ServicePerimeter.yaml index e85986dff856..d764a22a21b0 100644 --- a/mmv1/products/accesscontextmanager/go_ServicePerimeter.yaml +++ b/mmv1/products/accesscontextmanager/go_ServicePerimeter.yaml @@ -279,7 +279,10 @@ properties: description: | A Google Cloud resource that is allowed to ingress the perimeter. Requests from these resources will be allowed to access perimeter data. - Currently only projects are allowed. Format `projects/{project_number}` + Currently only projects and VPCs are allowed. + Project format: `projects/{projectNumber}` + VPC network format: + `//compute.googleapis.com/projects/{PROJECT_ID}/global/networks/{NAME}`. The project may be in any Google Cloud organization, not just the organization that the perimeter is defined in. `*` is not allowed, the case of allowing all Google Cloud resources only is not supported. diff --git a/mmv1/products/accesscontextmanager/go_ServicePerimeterDryRunEgressPolicy.yaml b/mmv1/products/accesscontextmanager/go_ServicePerimeterDryRunEgressPolicy.yaml new file mode 100644 index 000000000000..1803cf24bcea --- /dev/null +++ b/mmv1/products/accesscontextmanager/go_ServicePerimeterDryRunEgressPolicy.yaml @@ -0,0 +1,189 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'ServicePerimeterDryRunEgressPolicy' +description: | + Manage a single EgressPolicy in the spec (dry-run) configuration for a service perimeter. + EgressPolicies match requests based on egressFrom and egressTo stanzas. + For an EgressPolicy to match, both egressFrom and egressTo stanzas must be matched. + If an EgressPolicy matches a request, the request is allowed to span the ServicePerimeter + boundary. For example, an EgressPolicy can be used to allow VMs on networks + within the ServicePerimeter to access a defined set of projects outside the + perimeter in certain contexts (e.g. to read data from a Cloud Storage bucket + or query against a BigQuery dataset). + + ~> **Note:** By default, updates to this resource will remove the EgressPolicy from the + from the perimeter and add it back in a non-atomic manner. To ensure that the new EgressPolicy + is added before the old one is removed, add a `lifecycle` block with `create_before_destroy = true` to this resource. +references: + guides: + api: 'https://cloud.google.com/access-context-manager/docs/reference/rest/v1/accessPolicies.servicePerimeters#egresspolicy' +docs: +id_format: '{{perimeter}}' +base_url: '' +self_link: '{{perimeter}}' +create_url: '{{perimeter}}' +create_verb: 'PATCH' +update_mask: true +delete_verb: 'PATCH' +immutable: true +mutex: '{{perimeter}}' +import_format: + - '{{perimeter}}' +exclude_import: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +autogen_async: true +async: + actions: ['create', 'delete', 'update'] + type: 'OpAsync' + operation: + base_url: '{{op_id}}' + path: 'name' + wait_ms: 1000 + result: + path: 'response' + resource_inside_response: true + error: + path: 'error' + message: 'message' +identity: + - egressFrom + - egressTo +nested_query: + keys: + - spec + - egressPolicies + is_list_of_ids: false + modify_by_patch: true +custom_code: + pre_create: 'templates/terraform/pre_create/go/access_context_manager_dry_run_resource.go.tmpl' + pre_update: 'templates/terraform/pre_create/go/access_context_manager_dry_run_resource.go.tmpl' + pre_delete: 'templates/terraform/pre_create/go/access_context_manager_dry_run_resource.go.tmpl' + custom_import: 'templates/terraform/custom_import/go/access_context_manager_service_perimeter_ingress_policy.go.tmpl' +exclude_tgc: true +skip_sweeper: true +examples: + - name: 'access_context_manager_service_perimeter_dry_run_egress_policy' + skip_test: true +parameters: + - name: 'perimeter' + type: ResourceRef + description: | + The name of the Service Perimeter to add this resource to. + url_param_only: true + required: true + resource: 'ServicePerimeter' + imports: 'name' +properties: + - name: 'egressFrom' + type: NestedObject + description: | + Defines conditions on the source of a request causing this `EgressPolicy` to apply. + properties: + - name: 'identityType' + type: Enum + description: | + Specifies the type of identities that are allowed access to outside the + perimeter. If left unspecified, then members of `identities` field will + be allowed access. + enum_values: + - 'ANY_IDENTITY' + - 'ANY_USER_ACCOUNT' + - 'ANY_SERVICE_ACCOUNT' + - name: 'identities' + type: Array + description: | + A list of identities that are allowed access through this `EgressPolicy`. + Should be in the format of email address. The email address should + represent individual user or service account only. + item_type: + type: String + - name: 'sources' + type: Array + description: 'Sources that this EgressPolicy authorizes access from.' + item_type: + type: NestedObject + properties: + - name: 'accessLevel' + type: String + description: 'An AccessLevel resource name that allows resources outside the ServicePerimeter to be accessed from the inside.' + - name: 'sourceRestriction' + type: Enum + description: 'Whether to enforce traffic restrictions based on `sources` field. If the `sources` field is non-empty, then this field must be set to `SOURCE_RESTRICTION_ENABLED`.' + enum_values: + - 'SOURCE_RESTRICTION_ENABLED' + - 'SOURCE_RESTRICTION_DISABLED' + - name: 'egressTo' + type: NestedObject + description: | + Defines the conditions on the `ApiOperation` and destination resources that + cause this `EgressPolicy` to apply. + properties: + - name: 'resources' + type: Array + description: | + A list of resources, currently only projects in the form + `projects/`, that match this to stanza. A request matches + if it contains a resource in this list. If * is specified for resources, + then this `EgressTo` rule will authorize access to all resources outside + the perimeter. + item_type: + type: String + - name: 'externalResources' + type: Array + description: | + A list of external resources that are allowed to be accessed. A request + matches if it contains an external resource in this list (Example: + s3://bucket/path). Currently '*' is not allowed. + item_type: + type: String + - name: 'operations' + type: Array + description: | + A list of `ApiOperations` that this egress rule applies to. A request matches + if it contains an operation/service in this list. + item_type: + type: NestedObject + properties: + - name: 'serviceName' + type: String + description: | + The name of the API whose methods or permissions the `IngressPolicy` or + `EgressPolicy` want to allow. A single `ApiOperation` with serviceName + field set to `*` will allow all methods AND permissions for all services. + - name: 'methodSelectors' + type: Array + description: | + API methods or permissions to allow. Method or permission must belong + to the service specified by `serviceName` field. A single MethodSelector + entry with `*` specified for the `method` field will allow all methods + AND permissions for the service specified in `serviceName`. + item_type: + type: NestedObject + properties: + - name: 'method' + type: String + description: | + Value for `method` should be a valid method name for the corresponding + `serviceName` in `ApiOperation`. If `*` used as value for method, + then ALL methods and permissions are allowed. + - name: 'permission' + type: String + description: | + Value for permission should be a valid Cloud IAM permission for the + corresponding `serviceName` in `ApiOperation`. diff --git a/mmv1/products/accesscontextmanager/go_ServicePerimeterDryRunIngressPolicy.yaml b/mmv1/products/accesscontextmanager/go_ServicePerimeterDryRunIngressPolicy.yaml new file mode 100644 index 000000000000..dbbdc68165e6 --- /dev/null +++ b/mmv1/products/accesscontextmanager/go_ServicePerimeterDryRunIngressPolicy.yaml @@ -0,0 +1,198 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'ServicePerimeterDryRunIngressPolicy' +description: | + Manage a single IngressPolicy in the spec (dry-run) configuration for a service perimeter. + IngressPolicies match requests based on ingressFrom and ingressTo stanzas. For an ingress policy to match, + both the ingressFrom and ingressTo stanzas must be matched. If an IngressPolicy matches a request, + the request is allowed through the perimeter boundary from outside the perimeter. + For example, access from the internet can be allowed either based on an AccessLevel or, + for traffic hosted on Google Cloud, the project of the source network. + For access from private networks, using the project of the hosting network is required. + Individual ingress policies can be limited by restricting which services and/ + or actions they match using the ingressTo field. + + ~> **Note:** By default, updates to this resource will remove the IngressPolicy from the + from the perimeter and add it back in a non-atomic manner. To ensure that the new IngressPolicy + is added before the old one is removed, add a `lifecycle` block with `create_before_destroy = true` to this resource. +references: + guides: + api: 'https://cloud.google.com/access-context-manager/docs/reference/rest/v1/accessPolicies.servicePerimeters#ingresspolicy' +docs: +id_format: '{{perimeter}}' +base_url: '' +self_link: '{{perimeter}}' +create_url: '{{perimeter}}' +create_verb: 'PATCH' +update_mask: true +delete_verb: 'PATCH' +immutable: true +mutex: '{{perimeter}}' +import_format: + - '{{perimeter}}' +exclude_import: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +autogen_async: true +async: + actions: ['create', 'delete', 'update'] + type: 'OpAsync' + operation: + base_url: '{{op_id}}' + path: 'name' + wait_ms: 1000 + result: + path: 'response' + resource_inside_response: true + error: + path: 'error' + message: 'message' +identity: + - ingressFrom + - ingressTo +nested_query: + keys: + - spec + - ingressPolicies + is_list_of_ids: false + modify_by_patch: true +custom_code: + pre_create: 'templates/terraform/pre_create/go/access_context_manager_dry_run_resource.go.tmpl' + pre_update: 'templates/terraform/pre_create/go/access_context_manager_dry_run_resource.go.tmpl' + pre_delete: 'templates/terraform/pre_create/go/access_context_manager_dry_run_resource.go.tmpl' + custom_import: 'templates/terraform/custom_import/go/access_context_manager_service_perimeter_ingress_policy.go.tmpl' +exclude_tgc: true +skip_sweeper: true +examples: + - name: 'access_context_manager_service_perimeter_dry_run_ingress_policy' + skip_test: true +parameters: + - name: 'perimeter' + type: ResourceRef + description: | + The name of the Service Perimeter to add this resource to. + url_param_only: true + required: true + resource: 'ServicePerimeter' + imports: 'name' +properties: + - name: 'ingressFrom' + type: NestedObject + description: | + Defines the conditions on the source of a request causing this `IngressPolicy` + to apply. + properties: + - name: 'identityType' + type: Enum + description: | + Specifies the type of identities that are allowed access from outside the + perimeter. If left unspecified, then members of `identities` field will be + allowed access. + enum_values: + - 'ANY_IDENTITY' + - 'ANY_USER_ACCOUNT' + - 'ANY_SERVICE_ACCOUNT' + - name: 'identities' + type: Array + description: | + A list of identities that are allowed access through this ingress policy. + Should be in the format of email address. The email address should represent + individual user or service account only. + item_type: + type: String + - name: 'sources' + type: Array + description: | + Sources that this `IngressPolicy` authorizes access from. + item_type: + type: NestedObject + properties: + - name: 'accessLevel' + type: String + description: | + An `AccessLevel` resource name that allow resources within the + `ServicePerimeters` to be accessed from the internet. `AccessLevels` listed + must be in the same policy as this `ServicePerimeter`. Referencing a nonexistent + `AccessLevel` will cause an error. If no `AccessLevel` names are listed, + resources within the perimeter can only be accessed via Google Cloud calls + with request origins within the perimeter. + Example `accessPolicies/MY_POLICY/accessLevels/MY_LEVEL.` + If * is specified, then all IngressSources will be allowed. + - name: 'resource' + type: String + description: | + A Google Cloud resource that is allowed to ingress the perimeter. + Requests from these resources will be allowed to access perimeter data. + Currently only projects are allowed. Format `projects/{project_number}` + The project may be in any Google Cloud organization, not just the + organization that the perimeter is defined in. `*` is not allowed, the case + of allowing all Google Cloud resources only is not supported. + - name: 'ingressTo' + type: NestedObject + description: | + Defines the conditions on the `ApiOperation` and request destination that cause + this `IngressPolicy` to apply. + properties: + - name: 'resources' + type: Array + description: | + A list of resources, currently only projects in the form + `projects/`, protected by this `ServicePerimeter` + that are allowed to be accessed by sources defined in the + corresponding `IngressFrom`. A request matches if it contains + a resource in this list. If `*` is specified for resources, + then this `IngressTo` rule will authorize access to all + resources inside the perimeter, provided that the request + also matches the `operations` field. + item_type: + type: String + - name: 'operations' + type: Array + description: | + A list of `ApiOperations` the sources specified in corresponding `IngressFrom` + are allowed to perform in this `ServicePerimeter`. + item_type: + type: NestedObject + properties: + - name: 'serviceName' + type: String + description: | + The name of the API whose methods or permissions the `IngressPolicy` or + `EgressPolicy` want to allow. A single `ApiOperation` with `serviceName` + field set to `*` will allow all methods AND permissions for all services. + - name: 'methodSelectors' + type: Array + description: | + API methods or permissions to allow. Method or permission must belong to + the service specified by serviceName field. A single `MethodSelector` entry + with `*` specified for the method field will allow all methods AND + permissions for the service specified in `serviceName`. + item_type: + type: NestedObject + properties: + - name: 'method' + type: String + description: | + Value for method should be a valid method name for the corresponding + serviceName in `ApiOperation`. If `*` used as value for `method`, then + ALL methods and permissions are allowed. + - name: 'permission' + type: String + description: | + Value for permission should be a valid Cloud IAM permission for the + corresponding `serviceName` in `ApiOperation`. diff --git a/mmv1/products/accesscontextmanager/go_ServicePerimeterDryRunResource.yaml b/mmv1/products/accesscontextmanager/go_ServicePerimeterDryRunResource.yaml index 33401434214b..ec19b9600bba 100644 --- a/mmv1/products/accesscontextmanager/go_ServicePerimeterDryRunResource.yaml +++ b/mmv1/products/accesscontextmanager/go_ServicePerimeterDryRunResource.yaml @@ -73,9 +73,9 @@ nested_query: is_list_of_ids: true modify_by_patch: true custom_code: - pre_create: 'templates/terraform/pre_create/go/access_context_manager_service_perimeter_dry_run_resource.go.tmpl' - pre_update: 'templates/terraform/pre_create/go/access_context_manager_service_perimeter_dry_run_resource.go.tmpl' - pre_delete: 'templates/terraform/pre_create/go/access_context_manager_service_perimeter_dry_run_resource.go.tmpl' + pre_create: 'templates/terraform/pre_create/go/access_context_manager_dry_run_resource.go.tmpl' + pre_update: 'templates/terraform/pre_create/go/access_context_manager_dry_run_resource.go.tmpl' + pre_delete: 'templates/terraform/pre_create/go/access_context_manager_dry_run_resource.go.tmpl' custom_import: 'templates/terraform/custom_import/go/access_context_manager_service_perimeter_resource.go.tmpl' exclude_tgc: true skip_sweeper: true diff --git a/mmv1/products/accesscontextmanager/go_ServicePerimeterEgressPolicy.yaml b/mmv1/products/accesscontextmanager/go_ServicePerimeterEgressPolicy.yaml index 849dea461231..d6f02371e73b 100644 --- a/mmv1/products/accesscontextmanager/go_ServicePerimeterEgressPolicy.yaml +++ b/mmv1/products/accesscontextmanager/go_ServicePerimeterEgressPolicy.yaml @@ -15,6 +15,7 @@ --- name: 'ServicePerimeterEgressPolicy' description: | + Manage a single EgressPolicy in the status (enforced) configuration for a service perimeter. EgressPolicies match requests based on egressFrom and egressTo stanzas. For an EgressPolicy to match, both egressFrom and egressTo stanzas must be matched. If an EgressPolicy matches a request, the request is allowed to span the ServicePerimeter @@ -41,6 +42,7 @@ immutable: true mutex: '{{perimeter}}' import_format: - '{{perimeter}}' +exclude_import: true timeouts: insert_minutes: 20 update_minutes: 20 diff --git a/mmv1/products/accesscontextmanager/go_ServicePerimeterIngressPolicy.yaml b/mmv1/products/accesscontextmanager/go_ServicePerimeterIngressPolicy.yaml index e5d15022dc31..0fbbe31b5a51 100644 --- a/mmv1/products/accesscontextmanager/go_ServicePerimeterIngressPolicy.yaml +++ b/mmv1/products/accesscontextmanager/go_ServicePerimeterIngressPolicy.yaml @@ -15,6 +15,7 @@ --- name: 'ServicePerimeterIngressPolicy' description: | + Manage a single IngressPolicy in the status (enforced) configuration for a service perimeter. IngressPolicies match requests based on ingressFrom and ingressTo stanzas. For an ingress policy to match, both the ingressFrom and ingressTo stanzas must be matched. If an IngressPolicy matches a request, the request is allowed through the perimeter boundary from outside the perimeter. @@ -42,6 +43,7 @@ immutable: true mutex: '{{perimeter}}' import_format: - '{{perimeter}}' +exclude_import: true timeouts: insert_minutes: 20 update_minutes: 20 @@ -133,7 +135,10 @@ properties: description: | A Google Cloud resource that is allowed to ingress the perimeter. Requests from these resources will be allowed to access perimeter data. - Currently only projects are allowed. Format `projects/{project_number}` + Currently only projects and VPCs are allowed. + Project format: `projects/{projectNumber}` + VPC network format: + `//compute.googleapis.com/projects/{PROJECT_ID}/global/networks/{NAME}`. The project may be in any Google Cloud organization, not just the organization that the perimeter is defined in. `*` is not allowed, the case of allowing all Google Cloud resources only is not supported. diff --git a/mmv1/products/activedirectory/go_Domain.yaml b/mmv1/products/activedirectory/go_Domain.yaml index 5e44da23b5f8..4be2e557fc74 100644 --- a/mmv1/products/activedirectory/go_Domain.yaml +++ b/mmv1/products/activedirectory/go_Domain.yaml @@ -36,6 +36,7 @@ timeouts: delete_minutes: 60 autogen_async: true async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: '{{op_id}}' @@ -51,20 +52,9 @@ async: error: path: 'error' message: 'message' -virtual_fields: - - !ruby/object:Api::Type::Boolean - name: 'deletion_protection' - default_value: true - description: | - Whether Terraform will be prevented from destroying the domain. Defaults to true. - When a`terraform destroy` or `terraform apply` would delete the domain, - the command will fail if this field is not set to false in Terraform state. - When the field is set to true or unset in Terraform state, a `terraform apply` - or `terraform destroy` that would delete the domain will fail. - When the field is set to false, deleting the domain is allowed. custom_code: - custom_import: 'templates/terraform/custom_import/go/self_link_as_name.tmpl' pre_delete: 'templates/terraform/pre_delete/go/active_directory_domain.go.tmpl' + custom_import: 'templates/terraform/custom_import/go/self_link_as_name.tmpl' error_abort_predicates: - 'transport_tpg.Is429QuotaError' @@ -74,9 +64,20 @@ examples: vars: name: 'myorg' domain_name: 'tfgen' - skip_test: true ignore_read_extra: - 'deletion_protection' + skip_test: true +virtual_fields: + - name: 'deletion_protection' + description: | + Whether Terraform will be prevented from destroying the domain. Defaults to true. + When a`terraform destroy` or `terraform apply` would delete the domain, + the command will fail if this field is not set to false in Terraform state. + When the field is set to true or unset in Terraform state, a `terraform apply` + or `terraform destroy` that would delete the domain will fail. + When the field is set to false, deleting the domain is allowed. + type: Boolean + default_value: true parameters: - name: 'domainName' type: String @@ -98,7 +99,6 @@ properties: - name: 'labels' type: KeyValueLabels description: 'Resource labels that can contain user-provided metadata' - immutable: false - name: 'authorizedNetworks' type: Array description: | @@ -128,7 +128,7 @@ properties: The name of delegated administrator account used to perform Active Directory operations. If not specified, setupadmin will be used. immutable: true - default_value: setupadmin + default_value: "setupadmin" - name: 'fqdn' type: String description: | diff --git a/mmv1/products/activedirectory/go_DomainTrust.yaml b/mmv1/products/activedirectory/go_DomainTrust.yaml index 5540fc424a36..fc6ff2e0846e 100644 --- a/mmv1/products/activedirectory/go_DomainTrust.yaml +++ b/mmv1/products/activedirectory/go_DomainTrust.yaml @@ -37,6 +37,7 @@ timeouts: delete_minutes: 20 autogen_async: true async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: '{{op_id}}' diff --git a/mmv1/products/activedirectory/go_Peering.yaml b/mmv1/products/activedirectory/go_Peering.yaml index 97a81343d131..7b5d44244b55 100644 --- a/mmv1/products/activedirectory/go_Peering.yaml +++ b/mmv1/products/activedirectory/go_Peering.yaml @@ -36,6 +36,7 @@ timeouts: delete_minutes: 20 autogen_async: true async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: '{{op_id}}' @@ -78,7 +79,6 @@ properties: type: KeyValueLabels description: 'Resource labels that can contain user-provided metadata' min_version: 'beta' - immutable: false - name: 'authorizedNetwork' type: String description: | diff --git a/mmv1/products/alloydb/go_Backup.yaml b/mmv1/products/alloydb/go_Backup.yaml index c5ad9794c13b..6685eade1fbc 100644 --- a/mmv1/products/alloydb/go_Backup.yaml +++ b/mmv1/products/alloydb/go_Backup.yaml @@ -143,7 +143,6 @@ properties: type: KeyValueLabels description: | User-defined labels for the alloydb backup. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. - immutable: false - name: 'state' type: String description: Output only. The current state of the backup. diff --git a/mmv1/products/alloydb/go_Cluster.yaml b/mmv1/products/alloydb/go_Cluster.yaml index 7c067f2458ea..8bda78a9fd3a 100644 --- a/mmv1/products/alloydb/go_Cluster.yaml +++ b/mmv1/products/alloydb/go_Cluster.yaml @@ -145,7 +145,6 @@ properties: - name: 'labels' type: KeyValueLabels description: 'User-defined labels for the alloydb cluster.' - immutable: false - name: 'encryptionConfig' type: NestedObject description: | @@ -217,19 +216,6 @@ properties: output: true item_type: type: String - - name: 'network' - type: String - description: | - The relative resource name of the VPC network on which the instance can be accessed. It is specified in the following form: - - "projects/{projectNumber}/global/networks/{network_id}". - default_from_api: true - exactly_one_of: - - 'network' - - 'network_config.0.network' - - 'psc_config.0.psc_enabled' - diff_suppress_func: 'tpgresource.ProjectNumberDiffSuppress' - deprecation_message: '`network` is deprecated and will be removed in a future major release. Instead, use `network_config` to define the network configuration.' - name: 'networkConfig' type: NestedObject description: | @@ -242,7 +228,6 @@ properties: The resource link for the VPC network in which cluster resources are created and from which they are accessible via Private IP. The network must belong to the same project as the cluster. It is specified in the form: "projects/{projectNumber}/global/networks/{network_id}". exactly_one_of: - - 'network' - 'network_config.0.network' - 'psc_config.0.psc_enabled' diff_suppress_func: 'tpgresource.ProjectNumberDiffSuppress' @@ -569,3 +554,33 @@ properties: type: Integer description: | Fractions of seconds in nanoseconds. Currently, only the value 0 is supported. + - name: 'subscriptionType' + type: Enum + description: | + The subscrition type of cluster. + default_from_api: true + enum_values: + - 'TRIAL' + - 'STANDARD' + - name: 'trialMetadata' + type: NestedObject + description: | + Contains information and all metadata related to TRIAL clusters. + output: true + properties: + - name: 'startTime' + type: String + description: | + Start time of the trial cluster. + - name: 'endTime' + type: String + description: | + End time of the trial cluster. + - name: 'upgradeTime' + type: String + description: | + Upgrade time of the trial cluster to standard cluster. + - name: 'graceEndTime' + type: String + description: | + Grace end time of the trial cluster. diff --git a/mmv1/products/alloydb/go_Instance.yaml b/mmv1/products/alloydb/go_Instance.yaml index 531c73834079..a4a37eca0763 100644 --- a/mmv1/products/alloydb/go_Instance.yaml +++ b/mmv1/products/alloydb/go_Instance.yaml @@ -107,6 +107,16 @@ examples: - 'reconciling' - 'update_time' skip_docs: true + - name: 'alloydb_instance_psc_test' + primary_resource_id: 'default' + vars: + alloydb_cluster_name: 'alloydb-cluster' + alloydb_instance_name: 'alloydb-instance' + network_name: 'alloydb-network' + ignore_read_extra: + - 'reconciling' + - 'update_time' + skip_docs: true parameters: - name: 'cluster' type: ResourceRef @@ -149,7 +159,6 @@ properties: - name: 'labels' type: KeyValueLabels description: 'User-defined labels for the alloydb instance.' - immutable: false - name: 'annotations' type: KeyValueAnnotations description: @@ -238,8 +247,8 @@ properties: - name: 'observabilityConfig' type: NestedObject description: 'Configuration for enhanced query insights.' + min_version: 'beta' default_from_api: true - min_version: beta properties: - name: 'enabled' type: Boolean @@ -311,6 +320,7 @@ properties: type: NestedObject description: | Configuration for Private Service Connect (PSC) for the instance. + default_from_api: true properties: - name: 'serviceAttachmentLink' type: String diff --git a/mmv1/products/apigateway/go_Api.yaml b/mmv1/products/apigateway/go_Api.yaml index 97832cc6be8d..1222360c0879 100644 --- a/mmv1/products/apigateway/go_Api.yaml +++ b/mmv1/products/apigateway/go_Api.yaml @@ -112,4 +112,3 @@ properties: description: | Resource labels to represent user-provided metadata. min_version: 'beta' - immutable: false diff --git a/mmv1/products/apigateway/go_ApiConfig.yaml b/mmv1/products/apigateway/go_ApiConfig.yaml index 353500ec8445..9feafb296052 100644 --- a/mmv1/products/apigateway/go_ApiConfig.yaml +++ b/mmv1/products/apigateway/go_ApiConfig.yaml @@ -139,7 +139,6 @@ properties: description: | Resource labels to represent user-provided metadata. min_version: 'beta' - immutable: false - name: 'gatewayConfig' type: NestedObject description: | diff --git a/mmv1/products/apigateway/go_Gateway.yaml b/mmv1/products/apigateway/go_Gateway.yaml index ad642df0cd63..db8dccc8072c 100644 --- a/mmv1/products/apigateway/go_Gateway.yaml +++ b/mmv1/products/apigateway/go_Gateway.yaml @@ -129,4 +129,3 @@ properties: description: | Resource labels to represent user-provided metadata. min_version: 'beta' - immutable: false diff --git a/mmv1/products/appengine/go_FlexibleAppVersion.yaml b/mmv1/products/appengine/go_FlexibleAppVersion.yaml index da141620af6c..4a4707ce7e40 100644 --- a/mmv1/products/appengine/go_FlexibleAppVersion.yaml +++ b/mmv1/products/appengine/go_FlexibleAppVersion.yaml @@ -45,7 +45,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/global/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'appengine#operation' path: 'name' wait_ms: 1000 @@ -76,6 +76,7 @@ examples: ignore_read_extra: - 'noop_on_destroy' - 'deployment.0.zip' + skip_test: true virtual_fields: - name: 'noop_on_destroy' description: | @@ -557,6 +558,7 @@ properties: key_description: | name of file value_type: + name: files type: NestedObject properties: - name: 'sha1Sum' diff --git a/mmv1/products/appengine/go_ServiceNetworkSettings.yaml b/mmv1/products/appengine/go_ServiceNetworkSettings.yaml index bcb415b7f01a..cb98cfd59850 100644 --- a/mmv1/products/appengine/go_ServiceNetworkSettings.yaml +++ b/mmv1/products/appengine/go_ServiceNetworkSettings.yaml @@ -40,7 +40,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/global/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'appengine#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/appengine/go_ServiceSplitTraffic.yaml b/mmv1/products/appengine/go_ServiceSplitTraffic.yaml index 64267de811d0..9413ab42b709 100644 --- a/mmv1/products/appengine/go_ServiceSplitTraffic.yaml +++ b/mmv1/products/appengine/go_ServiceSplitTraffic.yaml @@ -40,7 +40,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/global/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'appengine#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/appengine/go_StandardAppVersion.yaml b/mmv1/products/appengine/go_StandardAppVersion.yaml index 132fece11508..93a5f71fadb9 100644 --- a/mmv1/products/appengine/go_StandardAppVersion.yaml +++ b/mmv1/products/appengine/go_StandardAppVersion.yaml @@ -43,7 +43,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/global/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'appengine#operation' path: 'name' wait_ms: 1000 @@ -289,6 +289,7 @@ properties: key_description: | name of file value_type: + name: files type: NestedObject properties: - name: 'sha1Sum' diff --git a/mmv1/products/apphub/Application.yaml b/mmv1/products/apphub/Application.yaml index 1a6d163c6e16..94f0ec009493 100644 --- a/mmv1/products/apphub/Application.yaml +++ b/mmv1/products/apphub/Application.yaml @@ -56,7 +56,7 @@ examples: vars: application_id: "example-application" display_name: "Application Full" - description: 'Application for testing' + desc: 'Application for testing' business_name: "Alice" business_email: "alice@google.com" developer_name: "Bob" diff --git a/mmv1/products/apphub/Service.yaml b/mmv1/products/apphub/Service.yaml index 8249a4dbae52..84e3c408c1a2 100644 --- a/mmv1/products/apphub/Service.yaml +++ b/mmv1/products/apphub/Service.yaml @@ -66,7 +66,7 @@ examples: application_id: "example-application-1" service_project_attachment_id: "project-1" display_name: "Example Service Full" - description: 'Register service for testing' + desc: 'Register service for testing' business_name: "Alice" business_email: "alice@google.com" developer_name: "Bob" diff --git a/mmv1/products/apphub/Workload.yaml b/mmv1/products/apphub/Workload.yaml index 28ee7b2cb6af..42b9a9d80d24 100644 --- a/mmv1/products/apphub/Workload.yaml +++ b/mmv1/products/apphub/Workload.yaml @@ -45,7 +45,7 @@ examples: application_id: "example-application-1" service_project_attachment_id: "project-1" display_name: "Example Service Full" - description: 'Register service for testing' + desc: 'Register service for testing' business_name: "Alice" business_email: "alice@google.com" developer_name: "Bob" diff --git a/mmv1/products/apphub/go_Application.yaml b/mmv1/products/apphub/go_Application.yaml index a8536f9ad262..241e2b4cc1f5 100644 --- a/mmv1/products/apphub/go_Application.yaml +++ b/mmv1/products/apphub/go_Application.yaml @@ -56,7 +56,7 @@ examples: vars: application_id: 'example-application' display_name: 'Application Full' - description: 'Application for testing' + desc: 'Application for testing' business_name: 'Alice' business_email: 'alice@google.com' developer_name: 'Bob' diff --git a/mmv1/products/apphub/go_Service.yaml b/mmv1/products/apphub/go_Service.yaml index 4c87d342df09..c821f75d5cec 100644 --- a/mmv1/products/apphub/go_Service.yaml +++ b/mmv1/products/apphub/go_Service.yaml @@ -65,7 +65,7 @@ examples: application_id: 'example-application-1' service_project_attachment_id: 'project-1' display_name: 'Example Service Full' - description: 'Register service for testing' + desc: 'Register service for testing' business_name: 'Alice' business_email: 'alice@google.com' developer_name: 'Bob' diff --git a/mmv1/products/apphub/go_Workload.yaml b/mmv1/products/apphub/go_Workload.yaml index c7038b28cfa2..b020d2925e0c 100644 --- a/mmv1/products/apphub/go_Workload.yaml +++ b/mmv1/products/apphub/go_Workload.yaml @@ -64,7 +64,7 @@ examples: application_id: 'example-application-1' service_project_attachment_id: 'project-1' display_name: 'Example Service Full' - description: 'Register service for testing' + desc: 'Register service for testing' business_name: 'Alice' business_email: 'alice@google.com' developer_name: 'Bob' diff --git a/mmv1/products/artifactregistry/Repository.yaml b/mmv1/products/artifactregistry/Repository.yaml index 1ffd201923a8..573c53133848 100644 --- a/mmv1/products/artifactregistry/Repository.yaml +++ b/mmv1/products/artifactregistry/Repository.yaml @@ -52,13 +52,13 @@ examples: ])" vars: repository_id: 'my-repository' - description: 'example docker repository' + desc: 'example docker repository' - !ruby/object:Provider::Terraform::Examples name: 'artifact_registry_repository_docker' primary_resource_id: 'my-repo' vars: repository_id: 'my-repository' - description: 'example docker repository' + desc: 'example docker repository' - !ruby/object:Provider::Terraform::Examples name: 'artifact_registry_repository_cmek' primary_resource_id: 'my-repo' @@ -72,34 +72,34 @@ examples: primary_resource_id: 'my-repo' vars: repository_id: 'my-repository' - description: 'example virtual docker repository' + desc: 'example virtual docker repository' upstream_repository_id: 'my-repository-upstream' - upstream_description: 'example docker repository (upstream source)' + upstream_desc: 'example docker repository (upstream source)' upstream_policy_id: 'my-repository-upstream' - !ruby/object:Provider::Terraform::Examples name: 'artifact_registry_repository_remote' primary_resource_id: 'my-repo' vars: repository_id: 'my-repository' - description: 'example remote docker repository' + desc: 'example remote docker repository' - !ruby/object:Provider::Terraform::Examples name: 'artifact_registry_repository_remote_apt' primary_resource_id: 'my-repo' vars: repository_id: 'debian-buster' - description: 'example remote apt repository' + desc: 'example remote apt repository' - !ruby/object:Provider::Terraform::Examples name: 'artifact_registry_repository_remote_yum' primary_resource_id: 'my-repo' vars: repository_id: 'rocky-9' - description: 'example remote yum repository' + desc: 'example remote yum repository' - !ruby/object:Provider::Terraform::Examples name: 'artifact_registry_repository_cleanup' primary_resource_id: 'my-repo' vars: repository_id: 'my-repository' - description: 'example docker repository with cleanup policies' + desc: 'example docker repository with cleanup policies' - !ruby/object:Provider::Terraform::Examples name: 'artifact_registry_repository_remote_dockerhub_auth' primary_resource_id: 'my-repo' @@ -109,7 +109,7 @@ examples: - 'remote_repository_config.0.disable_upstream_validation' vars: repository_id: 'example-dockerhub-remote' - description: 'example remote dockerhub repository with credentials' + desc: 'example remote dockerhub repository with credentials' secret_id: 'example-secret' secret_resource_id: 'example-remote-secret' username: 'remote-username' @@ -123,7 +123,7 @@ examples: - 'remote_repository_config.0.disable_upstream_validation' vars: repository_id: 'example-docker-custom-remote' - description: 'example remote custom docker repository with credentials' + desc: 'example remote custom docker repository with credentials' secret_id: 'example-secret' secret_resource_id: 'example-remote-secret' username: 'remote-username' @@ -137,7 +137,7 @@ examples: - 'remote_repository_config.0.disable_upstream_validation' vars: repository_id: 'example-maven-custom-remote' - description: 'example remote custom maven repository with credentials' + desc: 'example remote custom maven repository with credentials' secret_id: 'example-secret' secret_resource_id: 'example-remote-secret' username: 'remote-username' @@ -151,7 +151,7 @@ examples: - 'remote_repository_config.0.disable_upstream_validation' vars: repository_id: 'example-npm-custom-remote' - description: 'example remote custom npm repository with credentials' + desc: 'example remote custom npm repository with credentials' secret_id: 'example-secret' secret_resource_id: 'example-remote-secret' username: 'remote-username' @@ -165,7 +165,7 @@ examples: - 'remote_repository_config.0.disable_upstream_validation' vars: repository_id: 'example-python-custom-remote' - description: 'example remote custom python repository with credentials' + desc: 'example remote custom python repository with credentials' secret_id: 'example-secret' secret_resource_id: 'example-remote-secret' username: 'remote-username' diff --git a/mmv1/products/artifactregistry/go_Repository.yaml b/mmv1/products/artifactregistry/go_Repository.yaml index 92eb331afea0..02159eb7daae 100644 --- a/mmv1/products/artifactregistry/go_Repository.yaml +++ b/mmv1/products/artifactregistry/go_Repository.yaml @@ -67,12 +67,12 @@ examples: primary_resource_name: 'fmt.Sprintf("tf-test-my-repository%s", context["random_suffix"])' vars: repository_id: 'my-repository' - description: 'example docker repository' + desc: 'example docker repository' - name: 'artifact_registry_repository_docker' primary_resource_id: 'my-repo' vars: repository_id: 'my-repository' - description: 'example docker repository' + desc: 'example docker repository' - name: 'artifact_registry_repository_cmek' primary_resource_id: 'my-repo' vars: @@ -84,35 +84,35 @@ examples: primary_resource_id: 'my-repo' vars: repository_id: 'my-repository' - description: 'example virtual docker repository' + desc: 'example virtual docker repository' upstream_repository_id: 'my-repository-upstream' - upstream_description: 'example docker repository (upstream source)' + upstream_desc: 'example docker repository (upstream source)' upstream_policy_id: 'my-repository-upstream' - name: 'artifact_registry_repository_remote' primary_resource_id: 'my-repo' vars: repository_id: 'my-repository' - description: 'example remote docker repository' + desc: 'example remote docker repository' - name: 'artifact_registry_repository_remote_apt' primary_resource_id: 'my-repo' vars: repository_id: 'debian-buster' - description: 'example remote apt repository' + desc: 'example remote apt repository' - name: 'artifact_registry_repository_remote_yum' primary_resource_id: 'my-repo' vars: repository_id: 'rocky-9' - description: 'example remote yum repository' + desc: 'example remote yum repository' - name: 'artifact_registry_repository_cleanup' primary_resource_id: 'my-repo' vars: repository_id: 'my-repository' - description: 'example docker repository with cleanup policies' + desc: 'example docker repository with cleanup policies' - name: 'artifact_registry_repository_remote_dockerhub_auth' primary_resource_id: 'my-repo' vars: repository_id: 'example-dockerhub-remote' - description: 'example remote dockerhub repository with credentials' + desc: 'example remote dockerhub repository with credentials' secret_id: 'example-secret' secret_resource_id: 'example-remote-secret' username: 'remote-username' @@ -123,7 +123,7 @@ examples: primary_resource_id: 'my-repo' vars: repository_id: 'example-docker-custom-remote' - description: 'example remote custom docker repository with credentials' + desc: 'example remote custom docker repository with credentials' secret_id: 'example-secret' secret_resource_id: 'example-remote-secret' username: 'remote-username' @@ -134,7 +134,7 @@ examples: primary_resource_id: 'my-repo' vars: repository_id: 'example-maven-custom-remote' - description: 'example remote custom maven repository with credentials' + desc: 'example remote custom maven repository with credentials' secret_id: 'example-secret' secret_resource_id: 'example-remote-secret' username: 'remote-username' @@ -145,7 +145,7 @@ examples: primary_resource_id: 'my-repo' vars: repository_id: 'example-npm-custom-remote' - description: 'example remote custom npm repository with credentials' + desc: 'example remote custom npm repository with credentials' secret_id: 'example-secret' secret_resource_id: 'example-remote-secret' username: 'remote-username' @@ -156,7 +156,7 @@ examples: primary_resource_id: 'my-repo' vars: repository_id: 'example-python-custom-remote' - description: 'example remote custom python repository with credentials' + desc: 'example remote custom python repository with credentials' secret_id: 'example-secret' secret_resource_id: 'example-remote-secret' username: 'remote-username' @@ -212,7 +212,6 @@ properties: longer than 63 characters. Label keys must begin with a lowercase letter and may only contain lowercase letters, numeric characters, underscores, and dashes. - immutable: false - name: 'kmsKeyName' type: String description: |- @@ -314,6 +313,7 @@ properties: key_description: |- The policy ID. Must be unique within a repository. value_type: + name: cleanupPolicies type: NestedObject properties: - name: 'action' diff --git a/mmv1/products/backupdr/BackupVault.yaml b/mmv1/products/backupdr/BackupVault.yaml index 253d6a3dfc6b..b8e3df6e5c0d 100644 --- a/mmv1/products/backupdr/BackupVault.yaml +++ b/mmv1/products/backupdr/BackupVault.yaml @@ -1,3 +1,16 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + --- !ruby/object:Api::Resource base_url: projects/{{project}}/locations/{{location}}/backupVaults create_url: projects/{{project}}/locations/{{location}}/backupVaults?backupVaultId={{backup_vault_id}} @@ -10,6 +23,26 @@ import_format: name: BackupVault description: Container to store and organize immutable and indelible backups. autogen_async: true +async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: name + base_url: "{{op_id}}" + wait_ms: 1000 + timeouts: + result: !ruby/object:Api::OpAsync::Result + path: response + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: done + complete: true + allowed: + - true + - false + error: !ruby/object:Api::OpAsync::Error + path: error + message: message +update_verb: :PATCH +update_mask: true examples: - !ruby/object:Provider::Terraform::Examples min_version: beta @@ -19,6 +52,42 @@ examples: backup_vault_id: 'backup-vault-test' test_env_vars: project: :PROJECT_NAME +parameters: + - !ruby/object:Api::Type::String + name: location + description: "The GCP location for the backup vault. " + url_param_only: true + required: true + immutable: true + - !ruby/object:Api::Type::String + name: backupVaultId + description: "Required. ID of the requesting object." + url_param_only: true + required: true + immutable: true + - !ruby/object:Api::Type::Boolean + name: 'force_update' + default_value: false + url_param_only: true + description: | + If set, allow update to extend the minimum enforced retention for backup vault. This overrides + the restriction against conflicting retention periods. This conflict may occur when the + expiration schedule defined by the associated backup plan is shorter than the minimum + retention set by the backup vault. + - !ruby/object:Api::Type::Boolean + name: 'force_delete' + default_value: false + url_param_only: true + description: | + If set, the following restrictions against deletion of the backup vault instance can be overridden: + * deletion of a backup vault instance containing no backups, but still containing empty datasources. + * deletion of a backup vault instance that is being referenced by an active backup plan. + - !ruby/object:Api::Type::Boolean + name: 'allow_missing' + default_value: false + url_param_only: true + description: | + Allow idempotent deletion of backup vault. The request will still succeed in case the backup vault does not exist. properties: - !ruby/object:Api::Type::String name: name @@ -56,8 +125,8 @@ properties: simultaneous updates from overwiting each other. " - !ruby/object:Api::Type::String name: state - description: "Output only. The BackupVault resource instance state. \n Possible - values:\n STATE_UNSPECIFIED\n CREATING\n ACTIVE\n DELETING\n ERROR" + description: "Output only. The BackupVault resource instance state. \n + Possible values:\n STATE_UNSPECIFIED\n CREATING\n ACTIVE\n DELETING\n ERROR" output: true - !ruby/object:Api::Type::String name: effectiveTime @@ -85,59 +154,3 @@ properties: name: annotations description: "Optional. User annotations. See https://google.aip.dev/128#annotations\nStores small amounts of arbitrary data. " -parameters: - - !ruby/object:Api::Type::String - name: location - description: "The GCP location for the backup vault. " - url_param_only: true - required: true - immutable: true - - !ruby/object:Api::Type::String - name: backupVaultId - description: "Required. ID of the requesting object." - url_param_only: true - required: true - immutable: true - - !ruby/object:Api::Type::Boolean - name: 'force_update' - default_value: false - url_param_only: true - description: | - If set, allow update to extend the minimum enforced retention for backup vault. This overrides - the restriction against conflicting retention periods. This conflict may occur when the - expiration schedule defined by the associated backup plan is shorter than the minimum - retention set by the backup vault. - - !ruby/object:Api::Type::Boolean - name: 'force_delete' - default_value: false - url_param_only: true - description: | - If set, the following restrictions against deletion of the backup vault instance can be overridden: - * deletion of a backup vault instance containing no backups, but still containing empty datasources. - * deletion of a backup vault instance that is being referenced by an active backup plan. - - !ruby/object:Api::Type::Boolean - name: 'allow_missing' - default_value: false - url_param_only: true - description: | - Allow idempotent deletion of backup vault. The request will still succeed in case the backup vault does not exist. -async: !ruby/object:Api::OpAsync - operation: !ruby/object:Api::OpAsync::Operation - path: name - base_url: "{{op_id}}" - wait_ms: 1000 - timeouts: - result: !ruby/object:Api::OpAsync::Result - path: response - resource_inside_response: true - status: !ruby/object:Api::OpAsync::Status - path: done - complete: true - allowed: - - true - - false - error: !ruby/object:Api::OpAsync::Error - path: error - message: message -update_verb: :PATCH -update_mask: true diff --git a/mmv1/products/backupdr/go_BackupVault.yaml b/mmv1/products/backupdr/go_BackupVault.yaml new file mode 100644 index 000000000000..2270f1b45c9d --- /dev/null +++ b/mmv1/products/backupdr/go_BackupVault.yaml @@ -0,0 +1,178 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'BackupVault' +description: Container to store and organize immutable and indelible backups. +min_version: 'beta' +docs: +id_format: 'projects/{{project}}/locations/{{location}}/backupVaults/{{backup_vault_id}}' +base_url: 'projects/{{project}}/locations/{{location}}/backupVaults' +self_link: 'projects/{{project}}/locations/{{location}}/backupVaults/{{backup_vault_id}}' +create_url: 'projects/{{project}}/locations/{{location}}/backupVaults?backupVaultId={{backup_vault_id}}' +update_url: 'projects/{{project}}/locations/{{location}}/backupVaults/{{backup_vault_id}}?force={{force_update}}' +update_verb: 'PATCH' +update_mask: true +delete_url: 'projects/{{project}}/locations/{{location}}/backupVaults/{{backup_vault_id}}?force={{force_delete}}&allowMissing={{allow_missing}}' +import_format: + - 'projects/{{project}}/locations/{{location}}/backupVaults/{{backup_vault_id}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +autogen_async: true +async: + actions: ['create', 'delete', 'update'] + type: 'OpAsync' + operation: + base_url: '{{op_id}}' + path: 'name' + wait_ms: 1000 + result: + path: 'response' + resource_inside_response: true + error: + path: 'error' + message: 'message' +custom_code: +examples: + - name: 'backup_dr_backup_vault_full' + primary_resource_id: 'backup-vault-test' + min_version: 'beta' + vars: + backup_vault_id: 'backup-vault-test' + test_env_vars: + project: 'PROJECT_NAME' +parameters: + - name: 'location' + type: String + description: "The GCP location for the backup vault. " + min_version: 'beta' + url_param_only: true + required: true + immutable: true + - name: 'backupVaultId' + type: String + description: "Required. ID of the requesting object." + min_version: 'beta' + url_param_only: true + required: true + immutable: true + - name: 'force_update' + type: Boolean + description: | + If set, allow update to extend the minimum enforced retention for backup vault. This overrides + the restriction against conflicting retention periods. This conflict may occur when the + expiration schedule defined by the associated backup plan is shorter than the minimum + retention set by the backup vault. + min_version: 'beta' + url_param_only: true + default_value: false + - name: 'force_delete' + type: Boolean + description: | + If set, the following restrictions against deletion of the backup vault instance can be overridden: + * deletion of a backup vault instance containing no backups, but still containing empty datasources. + * deletion of a backup vault instance that is being referenced by an active backup plan. + min_version: 'beta' + url_param_only: true + default_value: false + - name: 'allow_missing' + type: Boolean + description: | + Allow idempotent deletion of backup vault. The request will still succeed in case the backup vault does not exist. + min_version: 'beta' + url_param_only: true + default_value: false +properties: + - name: 'name' + type: String + description: 'Output only. Identifier. The resource name. ' + min_version: 'beta' + output: true + - name: 'description' + type: String + description: 'Optional. The description of the BackupVault instance (2048 characters + or less). ' + min_version: 'beta' + - name: 'labels' + type: KeyValueLabels + description: "Optional. Resource labels to represent user provided metadata. " + min_version: 'beta' + - name: 'createTime' + type: String + description: 'Output only. The time when the instance was created. ' + min_version: 'beta' + output: true + - name: 'updateTime' + type: String + description: 'Output only. The time when the instance was updated. ' + min_version: 'beta' + output: true + - name: 'backupMinimumEnforcedRetentionDuration' + type: String + description: "Required. The default and minimum enforced retention for each backup + within the backup vault. The enforced retention for each backup can be extended. " + min_version: 'beta' + required: true + - name: 'deletable' + type: Boolean + description: 'Output only. Set to true when there are no backups nested under this + resource. ' + min_version: 'beta' + output: true + - name: 'etag' + type: String + description: "Optional. Server specified ETag for the backup vault resource to prevent + simultaneous updates from overwiting each other. " + min_version: 'beta' + output: true + - name: 'state' + type: String + description: "Output only. The BackupVault resource instance state. \n + Possible values:\n STATE_UNSPECIFIED\n CREATING\n ACTIVE\n DELETING\n ERROR" + min_version: 'beta' + output: true + - name: 'effectiveTime' + type: String + description: 'Optional. Time after which the BackupVault resource is locked. ' + min_version: 'beta' + - name: 'backupCount' + type: String + description: 'Output only. The number of backups in this backup vault. ' + min_version: 'beta' + output: true + - name: 'serviceAccount' + type: String + description: "Output only. Service account used by the BackupVault Service for this + BackupVault. The user should grant this account permissions in their workload + project to enable the service to run backups and restores there. " + min_version: 'beta' + output: true + - name: 'totalStoredBytes' + type: String + description: 'Output only. Total size of the storage used by all backup resources. ' + min_version: 'beta' + output: true + - name: 'uid' + type: String + description: "Output only. Output only Immutable after resource creation until + resource deletion. " + min_version: 'beta' + output: true + - name: 'annotations' + type: KeyValueAnnotations + description: "Optional. User annotations. See https://google.aip.dev/128#annotations\nStores + small amounts of arbitrary data. " + min_version: 'beta' diff --git a/mmv1/products/backupdr/go_product.yaml b/mmv1/products/backupdr/go_product.yaml index 5a35bae35181..c2a50b6b6705 100644 --- a/mmv1/products/backupdr/go_product.yaml +++ b/mmv1/products/backupdr/go_product.yaml @@ -14,7 +14,7 @@ # Warning: This is a temporary file, and should not be edited directly --- name: 'BackupDR' -display_name: 'Backup and DR' +display_name: 'Backup and DR Service' versions: - name: 'beta' base_url: 'https://backupdr.googleapis.com/v1/' diff --git a/mmv1/products/beyondcorp/go_AppConnection.yaml b/mmv1/products/beyondcorp/go_AppConnection.yaml index 1c244f6ece44..287bed2319fa 100644 --- a/mmv1/products/beyondcorp/go_AppConnection.yaml +++ b/mmv1/products/beyondcorp/go_AppConnection.yaml @@ -92,7 +92,6 @@ properties: type: KeyValueLabels description: | Resource labels to represent user provided metadata. - immutable: false - name: 'type' type: String description: | diff --git a/mmv1/products/beyondcorp/go_AppConnector.yaml b/mmv1/products/beyondcorp/go_AppConnector.yaml index 32fa325f82a0..1e322033d474 100644 --- a/mmv1/products/beyondcorp/go_AppConnector.yaml +++ b/mmv1/products/beyondcorp/go_AppConnector.yaml @@ -86,7 +86,6 @@ properties: type: KeyValueLabels description: | Resource labels to represent user provided metadata. - immutable: false - name: 'principalInfo' type: NestedObject description: | diff --git a/mmv1/products/beyondcorp/go_AppGateway.yaml b/mmv1/products/beyondcorp/go_AppGateway.yaml index 87c0b3a542e0..ce6b17482dd0 100644 --- a/mmv1/products/beyondcorp/go_AppGateway.yaml +++ b/mmv1/products/beyondcorp/go_AppGateway.yaml @@ -104,7 +104,6 @@ properties: type: KeyValueLabels description: | Resource labels to represent user provided metadata. - immutable: false - name: 'state' type: Enum description: | diff --git a/mmv1/products/bigquery/go_DatasetAccess.yaml b/mmv1/products/bigquery/go_DatasetAccess.yaml index b126307ef693..735c90460b51 100644 --- a/mmv1/products/bigquery/go_DatasetAccess.yaml +++ b/mmv1/products/bigquery/go_DatasetAccess.yaml @@ -130,6 +130,7 @@ properties: - 'dataset' - 'routine' diff_suppress_func: 'resourceBigQueryDatasetAccessIamMemberDiffSuppress' + custom_expand: 'templates/terraform/custom_expand/go/string_to_lower_case.go.tmpl' - name: 'groupByEmail' type: String description: An email address of a Google Group to grant access to. @@ -143,6 +144,7 @@ properties: - 'dataset' - 'routine' diff_suppress_func: 'resourceBigQueryDatasetAccessIamMemberDiffSuppress' + custom_expand: 'templates/terraform/custom_expand/go/string_to_lower_case.go.tmpl' - name: 'domain' type: String description: | diff --git a/mmv1/products/bigquery/go_Table.yaml b/mmv1/products/bigquery/go_Table.yaml index f45fc5b3e9d2..0dc0faf20302 100644 --- a/mmv1/products/bigquery/go_Table.yaml +++ b/mmv1/products/bigquery/go_Table.yaml @@ -35,7 +35,6 @@ iam_policy: fetch_iam_policy_verb: 'POST' allowed_iam_role: 'roles/bigquery.dataOwner' parent_resource_attribute: 'table_id' - iam_conditions_request_type: 'REQUEST_BODY' example_config_body: 'templates/terraform/iam/go/iam_attributes.go.tmpl' iam_policy_version: '1' custom_code: @@ -46,12 +45,6 @@ examples: vars: dataset_id: 'dataset_id' table_id: 'table_id' -virtual_fields: - - name: 'allow_resource_tags_on_deletion' - description: | - If set to true, it allows table deletion when there are still resource tags attached. - type: Boolean - default_value: false parameters: - name: 'dataset' type: String @@ -333,6 +326,7 @@ properties: - 'ORC' - 'PARQUET' - 'ICEBERG' + - 'DELTA_LAKE' - name: 'sourceUris' type: Array description: | diff --git a/mmv1/products/bigqueryanalyticshub/DataExchange.yaml b/mmv1/products/bigqueryanalyticshub/DataExchange.yaml index c74a33e466e0..4727ea1df30d 100644 --- a/mmv1/products/bigqueryanalyticshub/DataExchange.yaml +++ b/mmv1/products/bigqueryanalyticshub/DataExchange.yaml @@ -48,7 +48,7 @@ examples: region_override: 'US' vars: data_exchange_id: 'my_data_exchange' - description: 'example data exchange' + desc: 'example data exchange' - !ruby/object:Provider::Terraform::Examples name: 'bigquery_analyticshub_data_exchange_dcr' primary_resource_id: 'data_exchange' @@ -58,7 +58,7 @@ examples: region_override: 'US' vars: data_exchange_id: 'dcr_data_exchange' - description: 'example dcr data exchange' + desc: 'example dcr data exchange' properties: - !ruby/object:Api::Type::String name: name diff --git a/mmv1/products/bigqueryanalyticshub/Listing.yaml b/mmv1/products/bigqueryanalyticshub/Listing.yaml index 03b0206f834a..b5342de37dc7 100644 --- a/mmv1/products/bigqueryanalyticshub/Listing.yaml +++ b/mmv1/products/bigqueryanalyticshub/Listing.yaml @@ -52,7 +52,7 @@ examples: vars: data_exchange_id: 'my_data_exchange' listing_id: 'my_listing' - description: 'example data exchange' + desc: 'example data exchange' - !ruby/object:Provider::Terraform::Examples name: 'bigquery_analyticshub_listing_restricted' primary_resource_id: 'listing' @@ -65,7 +65,7 @@ examples: vars: data_exchange_id: 'my_data_exchange' listing_id: 'my_listing' - description: 'example data exchange' + desc: 'example data exchange' - !ruby/object:Provider::Terraform::Examples name: 'bigquery_analyticshub_listing_dcr' primary_resource_id: 'listing' @@ -78,7 +78,7 @@ examples: vars: data_exchange_id: 'dcr_data_exchange' listing_id: 'dcr_listing' - description: 'example dcr data exchange' + desc: 'example dcr data exchange' properties: - !ruby/object:Api::Type::String name: name diff --git a/mmv1/products/bigqueryanalyticshub/go_DataExchange.yaml b/mmv1/products/bigqueryanalyticshub/go_DataExchange.yaml index 0a1a3312ab59..207306765337 100644 --- a/mmv1/products/bigqueryanalyticshub/go_DataExchange.yaml +++ b/mmv1/products/bigqueryanalyticshub/go_DataExchange.yaml @@ -49,7 +49,14 @@ examples: region_override: 'US' vars: data_exchange_id: 'my_data_exchange' - description: 'example data exchange' + desc: 'example data exchange' + - name: 'bigquery_analyticshub_data_exchange_dcr' + primary_resource_id: 'data_exchange' + primary_resource_name: 'fmt.Sprintf("tf_test_my_data_exchange%s", context["random_suffix"])' + region_override: 'US' + vars: + data_exchange_id: 'dcr_data_exchange' + desc: 'example dcr data exchange' parameters: properties: - name: 'name' @@ -98,3 +105,35 @@ properties: type: String description: |- Base64 encoded image representing the data exchange. + - name: 'sharingEnvironmentConfig' + type: NestedObject + description: | + Configurable data sharing environment option for a data exchange. + This field is required for data clean room exchanges. + immutable: true + default_from_api: true + properties: + - name: 'defaultExchangeConfig' + type: NestedObject + description: | + Default Analytics Hub data exchange, used for secured data sharing. + immutable: true + send_empty_value: true + allow_empty_object: true + exactly_one_of: + - 'sharing_environment_config.0.default_exchange_config' + - 'sharing_environment_config.0.dcr_exchange_config' + properties: + [] + - name: 'dcrExchangeConfig' + type: NestedObject + description: | + Data Clean Room (DCR), used for privacy-safe and secured data sharing. + immutable: true + send_empty_value: true + allow_empty_object: true + exactly_one_of: + - 'sharing_environment_config.0.default_exchange_config' + - 'sharing_environment_config.0.dcr_exchange_config' + properties: + [] diff --git a/mmv1/products/bigqueryanalyticshub/go_Listing.yaml b/mmv1/products/bigqueryanalyticshub/go_Listing.yaml index 47161cd592f2..a88cb74158f5 100644 --- a/mmv1/products/bigqueryanalyticshub/go_Listing.yaml +++ b/mmv1/products/bigqueryanalyticshub/go_Listing.yaml @@ -50,7 +50,7 @@ examples: vars: data_exchange_id: 'my_data_exchange' listing_id: 'my_listing' - description: 'example data exchange' + desc: 'example data exchange' - name: 'bigquery_analyticshub_listing_restricted' primary_resource_id: 'listing' primary_resource_name: 'fmt.Sprintf("tf_test_my_data_exchange%s", context["random_suffix"]), fmt.Sprintf("tf_test_my_listing%s", context["random_suffix"])' @@ -58,7 +58,15 @@ examples: vars: data_exchange_id: 'my_data_exchange' listing_id: 'my_listing' - description: 'example data exchange' + desc: 'example data exchange' + - name: 'bigquery_analyticshub_listing_dcr' + primary_resource_id: 'listing' + primary_resource_name: 'fmt.Sprintf("tf_test_my_data_exchange%s", context["random_suffix"]), fmt.Sprintf("tf_test_my_listing%s", context["random_suffix"])' + region_override: 'US' + vars: + data_exchange_id: 'dcr_data_exchange' + listing_id: 'dcr_listing' + desc: 'example dcr data exchange' parameters: properties: - name: 'name' @@ -146,6 +154,7 @@ properties: type: NestedObject description: Shared dataset i.e. BigQuery dataset source. required: true + immutable: true properties: - name: 'dataset' type: String @@ -153,7 +162,21 @@ properties: Resource name of the dataset source for this listing. e.g. projects/myproject/datasets/123 required: true + immutable: true diff_suppress_func: 'tpgresource.ProjectNumberDiffSuppress' + - name: 'selectedResources' + type: Array + description: Resource in this dataset that is selectively shared. This field is required for data clean room exchanges. + immutable: true + item_type: + type: NestedObject + properties: + - name: 'table' + type: String + description: | + Format: For table: projects/{projectId}/datasets/{datasetId}/tables/{tableId} Example:"projects/test_project/datasets/test_dataset/tables/test_table" + immutable: true + diff_suppress_func: 'tpgresource.ProjectNumberDiffSuppress' - name: 'restrictedExportConfig' type: NestedObject description: If set, restricted export configuration will be propagated and enforced on the linked dataset. @@ -162,6 +185,11 @@ properties: type: Boolean description: If true, enable restricted export. + - name: 'restrictDirectTableAccess' + type: Boolean + description: + If true, restrict direct table access(read api/tabledata.list) on linked table. + output: true - name: 'restrictQueryResult' type: Boolean description: diff --git a/mmv1/products/bigqueryanalyticshub/go_product.yaml b/mmv1/products/bigqueryanalyticshub/go_product.yaml index 3873b7aa7338..c8f3297c4891 100644 --- a/mmv1/products/bigqueryanalyticshub/go_product.yaml +++ b/mmv1/products/bigqueryanalyticshub/go_product.yaml @@ -17,7 +17,7 @@ name: 'BigqueryAnalyticsHub' display_name: 'Bigquery Analytics Hub' versions: - name: 'beta' - base_url: 'https://analyticshub.googleapis.com/v1beta1/' + base_url: 'https://analyticshub.googleapis.com/v1/' - name: 'ga' base_url: 'https://analyticshub.googleapis.com/v1/' scopes: diff --git a/mmv1/products/bigquerydatatransfer/go_Config.yaml b/mmv1/products/bigquerydatatransfer/go_Config.yaml index 0e086afe91b4..c66fafb6ccd8 100644 --- a/mmv1/products/bigquerydatatransfer/go_Config.yaml +++ b/mmv1/products/bigquerydatatransfer/go_Config.yaml @@ -53,6 +53,19 @@ examples: display_name: 'my-query' dataset_id: 'my_dataset' skip_test: true + - name: 'bigquerydatatransfer_config_cmek' + primary_resource_id: 'query_config_cmek' + vars: + dataset_id: 'example_dataset' + key_name: 'example-key' + keyring_name: 'example-keyring' + skip_test: true + - name: 'bigquerydatatransfer_config_salesforce' + primary_resource_id: 'salesforce_config' + vars: + display_name: 'my-salesforce-config' + dataset_id: 'my_dataset' + skip_test: true parameters: - name: 'location' type: String @@ -172,6 +185,16 @@ properties: reingests data for [today-10, today-1], rather than ingesting data for just [today-1]. Only valid if the data source supports the feature. Set the value to 0 to use the default value. + - name: 'encryptionConfiguration' + type: NestedObject + description: | + Represents the encryption configuration for a transfer. + properties: + - name: 'kmsKeyName' + type: String + description: | + The name of the KMS key used for encrypting BigQuery data. + required: true - name: 'disabled' type: Boolean description: | diff --git a/mmv1/products/bigqueryreservation/go_Reservation.yaml b/mmv1/products/bigqueryreservation/go_Reservation.yaml index ba0d8f27d598..ea773b411be5 100644 --- a/mmv1/products/bigqueryreservation/go_Reservation.yaml +++ b/mmv1/products/bigqueryreservation/go_Reservation.yaml @@ -71,11 +71,6 @@ properties: description: | Maximum number of queries that are allowed to run concurrently in this reservation. This is a soft limit due to asynchronous nature of the system and various optimizations for small queries. Default value is 0 which means that concurrency will be automatically set based on the reservation size. default_value: 0 - - name: 'multiRegionAuxiliary' - type: Boolean - description: | - Applicable only for reservations located within one of the BigQuery multi-regions (US or EU). - If set to true, this reservation is placed in the organization's secondary region which is designated for disaster recovery purposes. If false, this reservation is placed in the organization's default region. - name: 'edition' type: String description: | diff --git a/mmv1/products/billing/go_ProjectInfo.yaml b/mmv1/products/billing/go_ProjectInfo.yaml index e837e0beb82e..9741a10ec7c6 100644 --- a/mmv1/products/billing/go_ProjectInfo.yaml +++ b/mmv1/products/billing/go_ProjectInfo.yaml @@ -21,7 +21,7 @@ references: 'Enable, disable, or change billing for a project': 'https://cloud.google.com/billing/docs/how-to/modify-project' api: 'https://cloud.google.com/billing/docs/reference/rest/v1/projects' docs: -id_format: 'projects/{{project}}/billingInfo' +id_format: 'projects/{{project}}' base_url: 'projects/{{project}}/billingInfo' create_verb: 'PUT' delete_verb: 'PUT' diff --git a/mmv1/products/binaryauthorization/go_Policy.yaml b/mmv1/products/binaryauthorization/go_Policy.yaml index 4a3aa59f4f5f..c8a9530d02cd 100644 --- a/mmv1/products/binaryauthorization/go_Policy.yaml +++ b/mmv1/products/binaryauthorization/go_Policy.yaml @@ -120,6 +120,7 @@ properties: } key_name: 'cluster' value_type: + name: clusterAdmissionRule type: NestedObject properties: - name: 'evaluationMode' diff --git a/mmv1/products/blockchainnodeengine/go_BlockchainNodes.yaml b/mmv1/products/blockchainnodeengine/go_BlockchainNodes.yaml index e7708589cd91..52a335f99759 100644 --- a/mmv1/products/blockchainnodeengine/go_BlockchainNodes.yaml +++ b/mmv1/products/blockchainnodeengine/go_BlockchainNodes.yaml @@ -88,7 +88,6 @@ properties: type: KeyValueLabels description: | User-provided key-value pairs - immutable: false - name: 'connectionInfo' type: NestedObject description: | diff --git a/mmv1/products/certificatemanager/go_Certificate.yaml b/mmv1/products/certificatemanager/go_Certificate.yaml index 708c706fb4ca..8450def41b28 100644 --- a/mmv1/products/certificatemanager/go_Certificate.yaml +++ b/mmv1/products/certificatemanager/go_Certificate.yaml @@ -117,7 +117,6 @@ properties: - name: 'labels' type: KeyValueLabels description: 'Set of label tags associated with the Certificate resource.' - immutable: false - name: 'scope' type: String description: | diff --git a/mmv1/products/certificatemanager/go_CertificateIssuanceConfig.yaml b/mmv1/products/certificatemanager/go_CertificateIssuanceConfig.yaml index 83105994c483..db1ec306dab8 100644 --- a/mmv1/products/certificatemanager/go_CertificateIssuanceConfig.yaml +++ b/mmv1/products/certificatemanager/go_CertificateIssuanceConfig.yaml @@ -115,7 +115,6 @@ properties: description: | 'Set of label tags associated with the CertificateIssuanceConfig resource. An object containing a list of "key": value pairs. Example: { "name": "wrench", "count": "3" }. - immutable: false - name: 'certificateAuthorityConfig' type: NestedObject description: | diff --git a/mmv1/products/certificatemanager/go_CertificateMap.yaml b/mmv1/products/certificatemanager/go_CertificateMap.yaml index 0d5419e4040a..48d4ac7d859b 100644 --- a/mmv1/products/certificatemanager/go_CertificateMap.yaml +++ b/mmv1/products/certificatemanager/go_CertificateMap.yaml @@ -81,7 +81,6 @@ properties: type: KeyValueLabels description: | Set of labels associated with a Certificate Map resource. - immutable: false - name: 'gclbTargets' type: Array description: | diff --git a/mmv1/products/certificatemanager/go_CertificateMapEntry.yaml b/mmv1/products/certificatemanager/go_CertificateMapEntry.yaml index 87075965dd9a..94217838ffd0 100644 --- a/mmv1/products/certificatemanager/go_CertificateMapEntry.yaml +++ b/mmv1/products/certificatemanager/go_CertificateMapEntry.yaml @@ -98,7 +98,6 @@ properties: Set of labels associated with a Certificate Map Entry. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. - immutable: false - name: 'certificates' type: Array description: | diff --git a/mmv1/products/certificatemanager/go_DnsAuthorization.yaml b/mmv1/products/certificatemanager/go_DnsAuthorization.yaml index 8c17d205e5b3..8923a5e2ed55 100644 --- a/mmv1/products/certificatemanager/go_DnsAuthorization.yaml +++ b/mmv1/products/certificatemanager/go_DnsAuthorization.yaml @@ -84,7 +84,6 @@ properties: type: KeyValueLabels description: 'Set of label tags associated with the DNS Authorization resource.' - immutable: false - name: 'domain' type: String description: | diff --git a/mmv1/products/certificatemanager/go_TrustConfig.yaml b/mmv1/products/certificatemanager/go_TrustConfig.yaml index 5357f70d30bf..f811963ba005 100644 --- a/mmv1/products/certificatemanager/go_TrustConfig.yaml +++ b/mmv1/products/certificatemanager/go_TrustConfig.yaml @@ -91,7 +91,6 @@ properties: - name: 'labels' type: KeyValueLabels description: 'Set of label tags associated with the trust config.' - immutable: false - name: 'description' type: String description: | diff --git a/mmv1/products/clouddeploy/go_Automation.yaml b/mmv1/products/clouddeploy/go_Automation.yaml index c32ee0bc28d9..d7dadd6404b5 100644 --- a/mmv1/products/clouddeploy/go_Automation.yaml +++ b/mmv1/products/clouddeploy/go_Automation.yaml @@ -104,7 +104,6 @@ properties: - name: 'labels' type: KeyValueLabels description: "Optional. Labels are attributes that can be set and used by both the user and by Cloud Deploy. Labels must meet the following constraints: * Keys and values can contain only lowercase letters, numeric characters, underscores, and dashes. * All characters must use UTF-8 encoding, and international characters are allowed. * Keys must start with a lowercase letter or international character. * Each resource is limited to a maximum of 64 labels. Both keys and values are additionally constrained to be <= 63 characters." - immutable: false - name: 'etag' type: String description: "Optional. The weak etag of the `Automation` resource. This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding." diff --git a/mmv1/products/clouddeploy/go_CustomTargetType.yaml b/mmv1/products/clouddeploy/go_CustomTargetType.yaml index 183e1e1bbc12..33e61223005b 100644 --- a/mmv1/products/clouddeploy/go_CustomTargetType.yaml +++ b/mmv1/products/clouddeploy/go_CustomTargetType.yaml @@ -109,7 +109,6 @@ properties: - name: 'labels' type: KeyValueLabels description: "Labels are attributes that can be set and used by both the user and by Cloud Deploy. Labels must meet the following constraints: * Keys and values can contain only lowercase letters, numeric characters, underscores, and dashes. * All characters must use UTF-8 encoding, and international characters are allowed. * Keys must start with a lowercase letter or international character. * Each resource is limited to a maximum of 64 labels. Both keys and values are additionally constrained to be <= 128 bytes." - immutable: false - name: 'createTime' type: String description: "Time at which the `CustomTargetType` was created." diff --git a/mmv1/products/clouddomains/go_Registration.yaml b/mmv1/products/clouddomains/go_Registration.yaml index 611824e3c475..d74db92ecbbb 100644 --- a/mmv1/products/clouddomains/go_Registration.yaml +++ b/mmv1/products/clouddomains/go_Registration.yaml @@ -95,7 +95,6 @@ properties: - name: 'labels' type: KeyValueLabels description: Set of labels associated with the Registration. - immutable: false - name: 'domainNotices' type: Array description: The list of domain notices that you acknowledge. Possible value is HSTS_PRELOADED diff --git a/mmv1/products/cloudfunctions/go_CloudFunction.yaml b/mmv1/products/cloudfunctions/go_CloudFunction.yaml index e63aab6647ef..15d0e8a3df5a 100644 --- a/mmv1/products/cloudfunctions/go_CloudFunction.yaml +++ b/mmv1/products/cloudfunctions/go_CloudFunction.yaml @@ -135,7 +135,6 @@ properties: type: KeyValueLabels description: | A set of key/value label pairs associated with this Cloud Function. - immutable: false - name: 'environmentVariables' type: KeyValuePairs description: | diff --git a/mmv1/products/cloudfunctions2/go_Function.yaml b/mmv1/products/cloudfunctions2/go_Function.yaml index de2ae6bae4b6..002d0be758d7 100644 --- a/mmv1/products/cloudfunctions2/go_Function.yaml +++ b/mmv1/products/cloudfunctions2/go_Function.yaml @@ -376,6 +376,7 @@ properties: Google Cloud Storage generation for the object. If the generation is omitted, the latest generation will be used. default_from_api: true + custom_flatten: 'templates/terraform/custom_flatten/go/cloudfunctions2_function_source_generation.go.tmpl' - name: 'repoSource' type: NestedObject description: @@ -709,7 +710,6 @@ properties: type: KeyValueLabels description: | A set of key/value label pairs associated with this Cloud Function. - immutable: false - name: 'kmsKeyName' type: String description: | diff --git a/mmv1/products/cloudquotas/go_QuotaPreference.yaml b/mmv1/products/cloudquotas/go_QuotaPreference.yaml index 2e6b5d5fe675..2a48249d5ca1 100644 --- a/mmv1/products/cloudquotas/go_QuotaPreference.yaml +++ b/mmv1/products/cloudquotas/go_QuotaPreference.yaml @@ -106,6 +106,7 @@ properties: description: | The trace id that the Google Cloud uses to provision the requested quota. This trace id may be used by the client to contact Cloud support to track the state of a quota preference request. The trace id is only produced for increase requests and is unique for each request. The quota decrease requests do not have a trace id. output: true + custom_expand: 'templates/terraform/custom_expand/go/cloudquotas_quota_preference_trace_id.go.tmpl' - name: 'annotations' type: KeyValuePairs description: |- diff --git a/mmv1/products/cloudrunv2/go_Job.yaml b/mmv1/products/cloudrunv2/go_Job.yaml index ed2b78d7a96f..77160e111161 100644 --- a/mmv1/products/cloudrunv2/go_Job.yaml +++ b/mmv1/products/cloudrunv2/go_Job.yaml @@ -55,6 +55,7 @@ iam_policy: - 'projects/{{project}}/locations/{{location}}/jobs/{{name}}' - '{{name}}' custom_code: + pre_delete: 'templates/terraform/pre_delete/go/cloudrunv2_job_deletion_policy.go.tmpl' taint_resource_on_failed_create: true examples: - name: 'cloudrunv2_job_basic' @@ -62,10 +63,14 @@ examples: primary_resource_name: 'fmt.Sprintf("tf-test-cloudrun-job%s", context["random_suffix"])' vars: cloud_run_job_name: 'cloudrun-job' + ignore_read_extra: + - 'deletion_protection' - name: 'cloudrunv2_job_limits' primary_resource_id: 'default' vars: cloud_run_job_name: 'cloudrun-job' + ignore_read_extra: + - 'deletion_protection' - name: 'cloudrunv2_job_sql' primary_resource_id: 'default' primary_resource_name: 'fmt.Sprintf("tf-test-cloudrun-srv%s", context["random_suffix"])' @@ -76,6 +81,8 @@ examples: deletion_protection: 'true' test_vars_overrides: 'deletion_protection': 'false' + ignore_read_extra: + - 'deletion_protection' - name: 'cloudrunv2_job_vpcaccess' primary_resource_id: 'default' primary_resource_name: 'fmt.Sprintf("tf-test-cloudrun-srv%s", context["random_suffix"])' @@ -84,29 +91,50 @@ examples: vpc_access_connector_name: 'run-vpc' vpc_compute_subnetwork_name: 'run-subnetwork' compute_network_name: 'run-network' + ignore_read_extra: + - 'deletion_protection' - name: 'cloudrunv2_job_directvpc' primary_resource_id: 'default' primary_resource_name: 'fmt.Sprintf("tf-test-cloudrun-job%s", context["random_suffix"])' vars: cloud_run_job_name: 'cloudrun-job' + ignore_read_extra: + - 'deletion_protection' - name: 'cloudrunv2_job_secret' primary_resource_id: 'default' primary_resource_name: 'fmt.Sprintf("tf-test-cloudrun-srv%s", context["random_suffix"])' vars: cloud_run_job_name: 'cloudrun-job' secret_id: 'secret' + ignore_read_extra: + - 'deletion_protection' - name: 'cloudrunv2_job_emptydir' primary_resource_id: 'default' primary_resource_name: 'fmt.Sprintf("tf-test-cloudrun-job%s", context["random_suffix"])' min_version: 'beta' vars: cloud_run_job_name: 'cloudrun-job' + ignore_read_extra: + - 'deletion_protection' - name: 'cloudrunv2_job_run_job' primary_resource_id: 'default' primary_resource_name: 'fmt.Sprintf("tf-test-cloudrun-job%s", context["random_suffix"])' min_version: 'beta' vars: cloud_run_job_name: 'cloudrun-job' + ignore_read_extra: + - 'deletion_protection' +virtual_fields: + - name: 'deletion_protection' + description: | + Whether Terraform will be prevented from destroying the job. Defaults to true. + When a`terraform destroy` or `terraform apply` would delete the job, + the command will fail if this field is not set to false in Terraform state. + When the field is set to true or unset in Terraform state, a `terraform apply` + or `terraform destroy` that would delete the job will fail. + When the field is set to false, deleting the job is allowed. + type: Boolean + default_value: true parameters: - name: 'location' type: String @@ -143,7 +171,6 @@ properties: Cloud Run API v2 does not support labels with `run.googleapis.com`, `cloud.googleapis.com`, `serving.knative.dev`, or `autoscaling.knative.dev` namespaces, and they will be rejected. All system labels in v1 now have a corresponding field in v2 Job. - immutable: false - name: 'annotations' type: KeyValueAnnotations description: |- @@ -220,6 +247,14 @@ properties: type: Boolean description: | If True, indicates to use the default project's binary authorization policy. If False, binary authorization will be disabled. + conflicts: + - policy + - name: 'policy' + type: String + description: | + The path to a binary authorization policy. Format: projects/{project}/platforms/cloudRun/{policy-name} + conflicts: + - use_default - name: 'startExecutionToken' type: String description: |- @@ -302,13 +337,14 @@ properties: - name: 'args' type: Array description: |- - Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references are not supported in Cloud Run. item_type: type: String - name: 'env' type: Array description: |- List of environment variables to set in the container. + is_set: true item_type: type: NestedObject properties: @@ -320,7 +356,7 @@ properties: - name: 'value' type: String description: |- - Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any route environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "", and the maximum length is 32768 bytes + Literal value of the environment variable. Defaults to "" and the maximum allowed length is 32768 characters. Variable references are not supported in Cloud Run. # exactly_one_of: # - template.0.template.0.containers.0.env.0.value # - template.0.template.0.containers.0.env.0.valueSource diff --git a/mmv1/products/cloudrunv2/go_Service.yaml b/mmv1/products/cloudrunv2/go_Service.yaml index 84222388d0b0..c33947cb0a89 100644 --- a/mmv1/products/cloudrunv2/go_Service.yaml +++ b/mmv1/products/cloudrunv2/go_Service.yaml @@ -55,6 +55,7 @@ iam_policy: - 'projects/{{project}}/locations/{{location}}/services/{{name}}' - '{{name}}' custom_code: + pre_delete: 'templates/terraform/pre_delete/go/cloudrunv2_service_deletion_policy.go.tmpl' taint_resource_on_failed_create: true examples: - name: 'cloudrunv2_service_basic' @@ -62,10 +63,14 @@ examples: primary_resource_name: 'fmt.Sprintf("tf-test-cloudrun-service%s", context["random_suffix"])' vars: cloud_run_service_name: 'cloudrun-service' + ignore_read_extra: + - 'deletion_protection' - name: 'cloudrunv2_service_limits' primary_resource_id: 'default' vars: cloud_run_service_name: 'cloudrun-service' + ignore_read_extra: + - 'deletion_protection' - name: 'cloudrunv2_service_sql' primary_resource_id: 'default' primary_resource_name: 'fmt.Sprintf("tf-test-cloudrun-srv%s", context["random_suffix"])' @@ -76,6 +81,8 @@ examples: deletion_protection: 'true' test_vars_overrides: 'deletion_protection': 'false' + ignore_read_extra: + - 'deletion_protection' - name: 'cloudrunv2_service_vpcaccess' primary_resource_id: 'default' primary_resource_name: 'fmt.Sprintf("tf-test-cloudrun-srv%s", context["random_suffix"])' @@ -84,38 +91,65 @@ examples: vpc_access_connector_name: 'run-vpc' vpc_compute_subnetwork_name: 'run-subnetwork' compute_network_name: 'run-network' + ignore_read_extra: + - 'deletion_protection' - name: 'cloudrunv2_service_directvpc' primary_resource_id: 'default' primary_resource_name: 'fmt.Sprintf("tf-test-cloudrun-srv%s", context["random_suffix"])' vars: cloud_run_service_name: 'cloudrun-service' + ignore_read_extra: + - 'deletion_protection' - name: 'cloudrunv2_service_probes' primary_resource_id: 'default' primary_resource_name: 'fmt.Sprintf("tf-test-cloudrun-srv%s", context["random_suffix"])' vars: cloud_run_service_name: 'cloudrun-service' + ignore_read_extra: + - 'deletion_protection' - name: 'cloudrunv2_service_secret' primary_resource_id: 'default' primary_resource_name: 'fmt.Sprintf("tf-test-cloudrun-srv%s", context["random_suffix"])' vars: cloud_run_service_name: 'cloudrun-service' secret_id: 'secret-1' + ignore_read_extra: + - 'deletion_protection' - name: 'cloudrunv2_service_multicontainer' primary_resource_id: 'default' primary_resource_name: 'fmt.Sprintf("tf-test-cloudrun-service%s", context["random_suffix"])' min_version: 'beta' vars: cloud_run_service_name: 'cloudrun-service' + ignore_read_extra: + - 'deletion_protection' - name: 'cloudrunv2_service_mount_gcs' primary_resource_id: 'default' primary_resource_name: 'fmt.Sprintf("tf-test-cloudrun-service-%s", context["random_suffix"])' vars: cloud_run_service_name: 'cloudrun-service' + ignore_read_extra: + - 'deletion_protection' + skip_vcr: true - name: 'cloudrunv2_service_mount_nfs' primary_resource_id: 'default' primary_resource_name: 'fmt.Sprintf("tf-test-cloudrun-service-%s", context["random_suffix"])' vars: cloud_run_service_name: 'cloudrun-service' + ignore_read_extra: + - 'deletion_protection' + skip_vcr: true +virtual_fields: + - name: 'deletion_protection' + description: | + Whether Terraform will be prevented from destroying the service. Defaults to true. + When a`terraform destroy` or `terraform apply` would delete the service, + the command will fail if this field is not set to false in Terraform state. + When the field is set to true or unset in Terraform state, a `terraform apply` + or `terraform destroy` that would delete the service will fail. + When the field is set to false, deleting the service is allowed. + type: Boolean + default_value: true parameters: - name: 'location' type: String @@ -156,7 +190,6 @@ properties: Cloud Run API v2 does not support labels with `run.googleapis.com`, `cloud.googleapis.com`, `serving.knative.dev`, or `autoscaling.knative.dev` namespaces, and they will be rejected. All system labels in v1 now have a corresponding field in v2 Service. - immutable: false - name: 'annotations' type: KeyValueAnnotations description: |- @@ -242,6 +275,14 @@ properties: type: Boolean description: | If True, indicates to use the default project's binary authorization policy. If False, binary authorization will be disabled. + conflicts: + - policy + - name: 'policy' + type: String + description: | + The path to a binary authorization policy. Format: projects/{project}/platforms/cloudRun/{policy-name} + conflicts: + - use_default - name: 'customAudiences' type: Array description: | @@ -388,25 +429,26 @@ properties: - name: 'args' type: Array description: |- - Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references are not supported in Cloud Run. item_type: type: String - name: 'env' type: Array description: |- List of environment variables to set in the container. + is_set: true item_type: type: NestedObject properties: - name: 'name' type: String description: |- - Name of the environment variable. Must be a C_IDENTIFIER, and mnay not exceed 32768 characters. + Name of the environment variable. Must be a C_IDENTIFIER, and may not exceed 32768 characters. required: true - name: 'value' type: String description: |- - Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any route environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "", and the maximum length is 32768 bytes + Literal value of the environment variable. Defaults to "" and the maximum allowed length is 32768 characters. Variable references are not supported in Cloud Run. # exactly_one_of: # - template.0.containers.0.env.0.value # - template.0.containers.0.env.0.valueSource @@ -497,7 +539,6 @@ properties: type: NestedObject description: |- Periodic probe of container liveness. Container will be restarted if the probe fails. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - default_from_api: true properties: - name: 'initialDelaySeconds' type: Integer diff --git a/mmv1/products/cloudtasks/go_Queue.yaml b/mmv1/products/cloudtasks/go_Queue.yaml index 2b2fde3f626e..870d58be1384 100644 --- a/mmv1/products/cloudtasks/go_Queue.yaml +++ b/mmv1/products/cloudtasks/go_Queue.yaml @@ -49,6 +49,14 @@ examples: - 'app_engine_routing_override.0.service' - 'app_engine_routing_override.0.version' - 'app_engine_routing_override.0.instance' + - name: 'cloud_tasks_queue_http_target_oidc' + primary_resource_id: 'http_target_oidc' + vars: + name: 'cloud-tasks-queue-http-target-oidc' + - name: 'cloud_tasks_queue_http_target_oauth' + primary_resource_id: 'http_target_oauth' + vars: + name: 'cloud-tasks-queue-http-target-oauth' parameters: - name: 'location' type: String @@ -199,3 +207,168 @@ properties: This field may contain any value between 0.0 and 1.0, inclusive. 0.0 is the default and means that no operations are logged. required: true + - name: 'httpTarget' + type: NestedObject + description: Modifies HTTP target for HTTP tasks. + properties: + - name: 'httpMethod' + type: Enum + description: | + The HTTP method to use for the request. + + When specified, it overrides HttpRequest for the task. + Note that if the value is set to GET the body of the task will be ignored at execution time. + default_from_api: true + enum_values: + - 'HTTP_METHOD_UNSPECIFIED' + - 'POST' + - 'GET' + - 'HEAD' + - 'PUT' + - 'DELETE' + - 'PATCH' + - 'OPTIONS' + - name: 'uriOverride' + type: NestedObject + description: | + URI override. + + When specified, overrides the execution URI for all the tasks in the queue. + properties: + - name: 'scheme' + type: Enum + description: | + Scheme override. + + When specified, the task URI scheme is replaced by the provided value (HTTP or HTTPS). + default_from_api: true + enum_values: + - 'HTTP' + - 'HTTPS' + - name: 'host' + type: String + description: | + Host override. + + When specified, replaces the host part of the task URL. + For example, if the task URL is "https://www.google.com", and host value + is set to "example.net", the overridden URI will be changed to "https://example.net". + Host value cannot be an empty string (INVALID_ARGUMENT). + - name: 'port' + type: String + description: | + Port override. + + When specified, replaces the port part of the task URI. + For instance, for a URI http://www.google.com/foo and port=123, the overridden URI becomes http://www.google.com:123/foo. + Note that the port value must be a positive integer. + Setting the port to 0 (Zero) clears the URI port. + - name: 'pathOverride' + type: NestedObject + description: | + URI path. + + When specified, replaces the existing path of the task URL. + Setting the path value to an empty string clears the URI path segment. + properties: + - name: 'path' + type: String + description: The URI path (e.g., /users/1234). Default is an empty string. + default_from_api: true + - name: 'queryOverride' + type: NestedObject + description: | + URI query. + + When specified, replaces the query part of the task URI. Setting the query value to an empty string clears the URI query segment. + properties: + - name: 'queryParams' + type: String + description: The query parameters (e.g., qparam1=123&qparam2=456). Default is an empty string. + default_from_api: true + - name: 'uriOverrideEnforceMode' + type: Enum + description: | + URI Override Enforce Mode + + When specified, determines the Target UriOverride mode. If not specified, it defaults to ALWAYS. + default_from_api: true + enum_values: + - 'ALWAYS' + - 'IF_NOT_EXISTS' + - name: 'headerOverrides' + type: Array + description: | + HTTP target headers. + + This map contains the header field names and values. + Headers will be set when running the CreateTask and/or BufferTask. + + These headers represent a subset of the headers that will be configured for the task's HTTP request. + Some HTTP request headers will be ignored or replaced. + + Headers which can have multiple values (according to RFC2616) can be specified using comma-separated values. + + The size of the headers must be less than 80KB. Queue-level headers to override headers of all the tasks in the queue. + item_type: + type: NestedObject + properties: + - name: 'header' + type: NestedObject + description: | + Header embodying a key and a value. + required: true + properties: + - name: 'key' + type: String + description: The Key of the header. + required: true + - name: 'value' + type: String + description: The Value of the header. + required: true + - name: 'oauthToken' + type: NestedObject + description: | + If specified, an OAuth token is generated and attached as the Authorization header in the HTTP request. + + This type of authorization should generally be used only when calling Google APIs hosted on *.googleapis.com. + Note that both the service account email and the scope MUST be specified when using the queue-level authorization override. + conflicts: + - oidcToken + properties: + - name: 'serviceAccountEmail' + type: String + description: | + Service account email to be used for generating OAuth token. + The service account must be within the same project as the queue. + The caller must have iam.serviceAccounts.actAs permission for the service account. + required: true + - name: 'scope' + type: String + description: | + OAuth scope to be used for generating OAuth access token. + If not specified, "https://www.googleapis.com/auth/cloud-platform" will be used. + default_from_api: true + - name: 'oidcToken' + type: NestedObject + description: | + If specified, an OIDC token is generated and attached as an Authorization header in the HTTP request. + + This type of authorization can be used for many scenarios, including calling Cloud Run, or endpoints where you intend to validate the token yourself. + Note that both the service account email and the audience MUST be specified when using the queue-level authorization override. + conflicts: + - oauthToken + properties: + - name: 'serviceAccountEmail' + type: String + description: | + Service account email to be used for generating OIDC token. + The service account must be within the same project as the queue. + The caller must have iam.serviceAccounts.actAs permission for the service account. + required: true + - name: 'audience' + type: String + description: | + Audience to be used when generating OIDC token. If not specified, the URI specified in target will be used. + default_from_api: true diff --git a/mmv1/products/composer/go_UserWorkloadsConfigMap.yaml b/mmv1/products/composer/go_UserWorkloadsConfigMap.yaml index 3d1da0e594ff..3fbf966eb003 100644 --- a/mmv1/products/composer/go_UserWorkloadsConfigMap.yaml +++ b/mmv1/products/composer/go_UserWorkloadsConfigMap.yaml @@ -28,6 +28,7 @@ timeouts: insert_minutes: 1 update_minutes: 1 delete_minutes: 1 +custom_code: examples: - name: 'composer_user_workloads_config_map_basic' primary_resource_id: 'config_map' diff --git a/mmv1/products/compute/go_Address.yaml b/mmv1/products/compute/go_Address.yaml index 644e8c27d57d..7c55a613f836 100644 --- a/mmv1/products/compute/go_Address.yaml +++ b/mmv1/products/compute/go_Address.yaml @@ -46,7 +46,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_Autoscaler.yaml b/mmv1/products/compute/go_Autoscaler.yaml index 58af3f1aae8b..d34abc20405f 100644 --- a/mmv1/products/compute/go_Autoscaler.yaml +++ b/mmv1/products/compute/go_Autoscaler.yaml @@ -37,7 +37,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/zones/{{zone}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_BackendBucket.yaml b/mmv1/products/compute/go_BackendBucket.yaml index 2cd35c63275c..d65a522c5c3c 100644 --- a/mmv1/products/compute/go_BackendBucket.yaml +++ b/mmv1/products/compute/go_BackendBucket.yaml @@ -38,7 +38,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/global/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_BackendBucketSignedUrlKey.yaml b/mmv1/products/compute/go_BackendBucketSignedUrlKey.yaml index 3c98d9c4729e..f7a24f705d2d 100644 --- a/mmv1/products/compute/go_BackendBucketSignedUrlKey.yaml +++ b/mmv1/products/compute/go_BackendBucketSignedUrlKey.yaml @@ -38,7 +38,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/global/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_BackendService.yaml b/mmv1/products/compute/go_BackendService.yaml index 0ff6dc96a582..7bc25a46fb42 100644 --- a/mmv1/products/compute/go_BackendService.yaml +++ b/mmv1/products/compute/go_BackendService.yaml @@ -37,7 +37,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/global/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 @@ -150,8 +150,6 @@ properties: See the [Backend Services Overview](https://cloud.google.com/load-balancing/docs/backend-service#balancing-mode) for an explanation of load balancing modes. - - From version 6.0.0 default value will be UTILIZATION to match default GCP value. default_value: "UTILIZATION" enum_values: - 'UTILIZATION' @@ -746,16 +744,18 @@ properties: description: Settings for enabling Cloud Identity Aware Proxy send_empty_value: true properties: + - name: 'enabled' + type: Boolean + description: Whether the serving infrastructure will authenticate and authorize all incoming requests. + required: true - name: 'oauth2ClientId' type: String description: | OAuth2 Client ID for IAP - required: true - name: 'oauth2ClientSecret' type: String description: | OAuth2 Client Secret for IAP - required: true ignore_read: true sensitive: true send_empty_value: true @@ -960,9 +960,6 @@ properties: Settings controlling eviction of unhealthy hosts from the load balancing pool. Applicable backend service types can be a global backend service with the loadBalancingScheme set to INTERNAL_SELF_MANAGED or EXTERNAL_MANAGED. - - From version 6.0.0 outlierDetection default terraform values will be removed to match default GCP value. - Default values are enforce by GCP without providing them. properties: - name: 'baseEjectionTime' type: NestedObject @@ -1013,7 +1010,6 @@ properties: - 'outlier_detection.0.success_rate_minimum_hosts' - 'outlier_detection.0.success_rate_request_volume' - 'outlier_detection.0.success_rate_stdev_factor' - default_value: 5 - name: 'consecutiveGatewayFailure' type: Integer description: | @@ -1032,7 +1028,6 @@ properties: - 'outlier_detection.0.success_rate_minimum_hosts' - 'outlier_detection.0.success_rate_request_volume' - 'outlier_detection.0.success_rate_stdev_factor' - default_value: 5 - name: 'enforcingConsecutiveErrors' type: Integer description: | @@ -1051,7 +1046,6 @@ properties: - 'outlier_detection.0.success_rate_minimum_hosts' - 'outlier_detection.0.success_rate_request_volume' - 'outlier_detection.0.success_rate_stdev_factor' - default_value: 100 - name: 'enforcingConsecutiveGatewayFailure' type: Integer description: | @@ -1070,7 +1064,6 @@ properties: - 'outlier_detection.0.success_rate_minimum_hosts' - 'outlier_detection.0.success_rate_request_volume' - 'outlier_detection.0.success_rate_stdev_factor' - default_value: 0 - name: 'enforcingSuccessRate' type: Integer description: | @@ -1089,7 +1082,6 @@ properties: - 'outlier_detection.0.success_rate_minimum_hosts' - 'outlier_detection.0.success_rate_request_volume' - 'outlier_detection.0.success_rate_stdev_factor' - default_value: 100 - name: 'interval' type: NestedObject description: | @@ -1137,7 +1129,6 @@ properties: - 'outlier_detection.0.success_rate_minimum_hosts' - 'outlier_detection.0.success_rate_request_volume' - 'outlier_detection.0.success_rate_stdev_factor' - default_value: 10 - name: 'successRateMinimumHosts' type: Integer description: | @@ -1157,7 +1148,6 @@ properties: - 'outlier_detection.0.success_rate_minimum_hosts' - 'outlier_detection.0.success_rate_request_volume' - 'outlier_detection.0.success_rate_stdev_factor' - default_value: 5 - name: 'successRateRequestVolume' type: Integer description: | @@ -1178,7 +1168,6 @@ properties: - 'outlier_detection.0.success_rate_minimum_hosts' - 'outlier_detection.0.success_rate_request_volume' - 'outlier_detection.0.success_rate_stdev_factor' - default_value: 100 - name: 'successRateStdevFactor' type: Integer description: | @@ -1201,7 +1190,6 @@ properties: - 'outlier_detection.0.success_rate_minimum_hosts' - 'outlier_detection.0.success_rate_request_volume' - 'outlier_detection.0.success_rate_stdev_factor' - default_value: 1900 - name: 'portName' type: String description: | diff --git a/mmv1/products/compute/go_BackendServiceSignedUrlKey.yaml b/mmv1/products/compute/go_BackendServiceSignedUrlKey.yaml index 6878e0000f40..5b6ed7d594b0 100644 --- a/mmv1/products/compute/go_BackendServiceSignedUrlKey.yaml +++ b/mmv1/products/compute/go_BackendServiceSignedUrlKey.yaml @@ -38,7 +38,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/global/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_Disk.yaml b/mmv1/products/compute/go_Disk.yaml index 0c05ca647a61..dbd62234755c 100644 --- a/mmv1/products/compute/go_Disk.yaml +++ b/mmv1/products/compute/go_Disk.yaml @@ -47,7 +47,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/zones/{{zone}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_DiskResourcePolicyAttachment.yaml b/mmv1/products/compute/go_DiskResourcePolicyAttachment.yaml index b5e5fa203277..744b21f80c6b 100644 --- a/mmv1/products/compute/go_DiskResourcePolicyAttachment.yaml +++ b/mmv1/products/compute/go_DiskResourcePolicyAttachment.yaml @@ -35,7 +35,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/zones/{{zone}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_ExternalVpnGateway.yaml b/mmv1/products/compute/go_ExternalVpnGateway.yaml index c27de4033e84..763cc50878f6 100644 --- a/mmv1/products/compute/go_ExternalVpnGateway.yaml +++ b/mmv1/products/compute/go_ExternalVpnGateway.yaml @@ -32,7 +32,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/global/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_Firewall.yaml b/mmv1/products/compute/go_Firewall.yaml index 6deaee15a1ae..0b36c5309d57 100644 --- a/mmv1/products/compute/go_Firewall.yaml +++ b/mmv1/products/compute/go_Firewall.yaml @@ -47,7 +47,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/global/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 @@ -113,7 +113,7 @@ properties: either an integer or a range. If not specified, this rule applies to connections through any port. - Example inputs include: ["22"], ["80","443"], and + Example inputs include: [22], [80, 443], and ["12345-12349"]. item_type: type: String @@ -153,7 +153,7 @@ properties: either an integer or a range. If not specified, this rule applies to connections through any port. - Example inputs include: ["22"], ["80","443"], and + Example inputs include: [22], [80, 443], and ["12345-12349"]. item_type: type: String diff --git a/mmv1/products/compute/go_ForwardingRule.yaml b/mmv1/products/compute/go_ForwardingRule.yaml index c1976d6981b4..e3a13970e4c2 100644 --- a/mmv1/products/compute/go_ForwardingRule.yaml +++ b/mmv1/products/compute/go_ForwardingRule.yaml @@ -19,6 +19,7 @@ description: | A ForwardingRule resource. A ForwardingRule resource specifies which pool of target virtual machines to forward a packet to if it matches the given [IPAddress, IPProtocol, portRange] tuple. +skip_attribution_label: true references: guides: 'Official Documentation': 'https://cloud.google.com/compute/docs/load-balancing/network/forwarding-rules' @@ -35,7 +36,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 @@ -47,8 +48,8 @@ async: message: 'message' collection_url_key: 'items' custom_code: - pre_create: 'templates/terraform/pre_create/go/compute_forwarding_rule.go.tmpl' constants: 'templates/terraform/constants/go/compute_forwarding_rule.go.tmpl' + pre_create: 'templates/terraform/pre_create/go/compute_forwarding_rule.go.tmpl' post_create: 'templates/terraform/post_create/go/labels.tmpl' custom_diff: - 'forwardingRuleCustomizeDiff' @@ -243,6 +244,12 @@ properties: This can only be set to true for load balancers that have their `loadBalancingScheme` set to `INTERNAL`. + - name: 'forwardingRuleId' + type: Integer + description: | + The unique identifier number for the resource. This identifier is defined by the server. + api_name: id + output: true - name: 'pscConnectionId' type: String description: 'The PSC connection id of the PSC Forwarding Rule.' diff --git a/mmv1/products/compute/go_GlobalAddress.yaml b/mmv1/products/compute/go_GlobalAddress.yaml index aa0d9774991b..056511a5f12d 100644 --- a/mmv1/products/compute/go_GlobalAddress.yaml +++ b/mmv1/products/compute/go_GlobalAddress.yaml @@ -34,7 +34,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/global/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 @@ -99,7 +99,6 @@ properties: description: | The fingerprint used for optimistic locking of this resource. Used internally during updates. - min_version: 'beta' output: true update_url: 'projects/{{project}}/global/addresses/{{name}}/setLabels' update_verb: 'POST' diff --git a/mmv1/products/compute/go_GlobalForwardingRule.yaml b/mmv1/products/compute/go_GlobalForwardingRule.yaml index 26e3be4ee14f..708e9064d70e 100644 --- a/mmv1/products/compute/go_GlobalForwardingRule.yaml +++ b/mmv1/products/compute/go_GlobalForwardingRule.yaml @@ -22,6 +22,7 @@ description: | balancing. For more information, see https://cloud.google.com/compute/docs/load-balancing/http/ +skip_attribution_label: true docs: base_url: 'projects/{{project}}/global/forwardingRules' has_self_link: true @@ -34,7 +35,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/global/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_GlobalNetworkEndpoint.yaml b/mmv1/products/compute/go_GlobalNetworkEndpoint.yaml index f6b78196a286..6971b7160623 100644 --- a/mmv1/products/compute/go_GlobalNetworkEndpoint.yaml +++ b/mmv1/products/compute/go_GlobalNetworkEndpoint.yaml @@ -41,7 +41,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/global/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_GlobalNetworkEndpointGroup.yaml b/mmv1/products/compute/go_GlobalNetworkEndpointGroup.yaml index cbcc8744fea8..fe76b39a76f3 100644 --- a/mmv1/products/compute/go_GlobalNetworkEndpointGroup.yaml +++ b/mmv1/products/compute/go_GlobalNetworkEndpointGroup.yaml @@ -38,7 +38,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/global/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_HaVpnGateway.yaml b/mmv1/products/compute/go_HaVpnGateway.yaml index 3cb96b3356cd..ec6ec91a0fa3 100644 --- a/mmv1/products/compute/go_HaVpnGateway.yaml +++ b/mmv1/products/compute/go_HaVpnGateway.yaml @@ -36,7 +36,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_HealthCheck.yaml b/mmv1/products/compute/go_HealthCheck.yaml index f2ea88f02cba..43ef731d0ada 100644 --- a/mmv1/products/compute/go_HealthCheck.yaml +++ b/mmv1/products/compute/go_HealthCheck.yaml @@ -46,7 +46,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/global/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 @@ -116,6 +116,18 @@ examples: min_version: 'beta' vars: health_check_name: 'tcp-health-check' + - name: 'compute_health_check_http_source_regions' + primary_resource_id: 'http-health-check-with-source-regions' + vars: + health_check_name: 'http-health-check' + - name: 'compute_health_check_https_source_regions' + primary_resource_id: 'https-health-check-with-source-regions' + vars: + health_check_name: 'https-health-check' + - name: 'compute_health_check_tcp_source_regions' + primary_resource_id: 'tcp-health-check-with-source-regions' + vars: + health_check_name: 'tcp-health-check' parameters: properties: - name: 'checkIntervalSec' @@ -179,7 +191,6 @@ properties: * The health check cannot be used with BackendService nor with managed instance group auto-healing. - min_version: 'beta' item_type: type: String min_size: 3 diff --git a/mmv1/products/compute/go_HttpHealthCheck.yaml b/mmv1/products/compute/go_HttpHealthCheck.yaml index 7ee7bc77a0d7..e88b8fa0d9a8 100644 --- a/mmv1/products/compute/go_HttpHealthCheck.yaml +++ b/mmv1/products/compute/go_HttpHealthCheck.yaml @@ -39,7 +39,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/global/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_HttpsHealthCheck.yaml b/mmv1/products/compute/go_HttpsHealthCheck.yaml index 34adda55888c..26ae15e47605 100644 --- a/mmv1/products/compute/go_HttpsHealthCheck.yaml +++ b/mmv1/products/compute/go_HttpsHealthCheck.yaml @@ -39,7 +39,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/global/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_Image.yaml b/mmv1/products/compute/go_Image.yaml index 86a701c6a04b..a69df4bbe8b3 100644 --- a/mmv1/products/compute/go_Image.yaml +++ b/mmv1/products/compute/go_Image.yaml @@ -48,7 +48,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/global/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_Instance.yaml b/mmv1/products/compute/go_Instance.yaml index ddaf6f6021bc..5d2eba3bf9d1 100644 --- a/mmv1/products/compute/go_Instance.yaml +++ b/mmv1/products/compute/go_Instance.yaml @@ -30,7 +30,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/zones/{{zone}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 @@ -608,14 +608,16 @@ properties: type: Enum description: | The confidential computing technology the instance uses. - SEV is an AMD feature. One of the following values: SEV, SEV_SNP. - If SEV_SNP, min_cpu_platform = "AMD Milan" is currently required. + SEV is an AMD feature. TDX is an Intel feature. One of the following + values is required: SEV, SEV_SNP, TDX. If SEV_SNP, min_cpu_platform = + "AMD Milan" is currently required. TDX is only available in beta. at_least_one_of: - 'confidential_instance_config.0.enable_confidential_compute' - 'confidential_instance_config.0.confidential_instance_type' enum_values: - 'SEV' - 'SEV_SNP' + - 'TDX' - name: 'status' type: Enum description: | diff --git a/mmv1/products/compute/go_InstanceGroup.yaml b/mmv1/products/compute/go_InstanceGroup.yaml index 48e0fa36a110..f647746e873c 100644 --- a/mmv1/products/compute/go_InstanceGroup.yaml +++ b/mmv1/products/compute/go_InstanceGroup.yaml @@ -33,7 +33,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/zones/{{zone}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_InstanceGroupManager.yaml b/mmv1/products/compute/go_InstanceGroupManager.yaml index 3a52e01f41f3..395d3d79857c 100644 --- a/mmv1/products/compute/go_InstanceGroupManager.yaml +++ b/mmv1/products/compute/go_InstanceGroupManager.yaml @@ -36,7 +36,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/zones/{{zone}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_InstanceGroupMembership.yaml b/mmv1/products/compute/go_InstanceGroupMembership.yaml index fd910f82c8d2..8b9c31172f23 100644 --- a/mmv1/products/compute/go_InstanceGroupMembership.yaml +++ b/mmv1/products/compute/go_InstanceGroupMembership.yaml @@ -50,7 +50,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/zones/{{zone}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_InstanceGroupNamedPort.yaml b/mmv1/products/compute/go_InstanceGroupNamedPort.yaml index fd1d5514a0e6..b5f75c41bb05 100644 --- a/mmv1/products/compute/go_InstanceGroupNamedPort.yaml +++ b/mmv1/products/compute/go_InstanceGroupNamedPort.yaml @@ -42,7 +42,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/zones/{{zone}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_InstanceSettings.yaml b/mmv1/products/compute/go_InstanceSettings.yaml index 3069656e3b43..a249f423c9ad 100644 --- a/mmv1/products/compute/go_InstanceSettings.yaml +++ b/mmv1/products/compute/go_InstanceSettings.yaml @@ -38,7 +38,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/zones/{{zone}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_Interconnect.yaml b/mmv1/products/compute/go_Interconnect.yaml index abc890c5a1bf..a43e4f6f9a88 100644 --- a/mmv1/products/compute/go_Interconnect.yaml +++ b/mmv1/products/compute/go_Interconnect.yaml @@ -34,7 +34,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 10000 @@ -379,16 +379,18 @@ properties: - name: 'requestedFeatures' type: Array description: | - interconnects.list of features requested for this Interconnect connection. Options: MACSEC ( + interconnects.list of features requested for this Interconnect connection. Options: IF_MACSEC ( If specified then the connection is created on MACsec capable hardware ports. If not specified, the default value is false, which allocates non-MACsec capable ports first if - available). + available). Note that MACSEC is still technically allowed for compatibility reasons, but it + does not work with the API, and will be removed in an upcoming major version. item_type: type: Enum description: | interconnects.list of features requested for this Interconnect connection enum_values: - 'MACSEC' + - 'IF_MACSEC' - name: 'availableFeatures' type: Array description: | @@ -398,8 +400,4 @@ properties: ports and MACsec isn't supported and enabling MACsec fails). output: true item_type: - type: Enum - description: | - interconnects.list of features available for this Interconnect connection, - enum_values: - - 'MACSEC' + type: String diff --git a/mmv1/products/compute/go_InterconnectAttachment.yaml b/mmv1/products/compute/go_InterconnectAttachment.yaml index 8ec6cc6738ee..34e9be6dac7a 100644 --- a/mmv1/products/compute/go_InterconnectAttachment.yaml +++ b/mmv1/products/compute/go_InterconnectAttachment.yaml @@ -30,7 +30,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_MachineImage.yaml b/mmv1/products/compute/go_MachineImage.yaml index 5f9a605e6787..0eaa8c794e23 100644 --- a/mmv1/products/compute/go_MachineImage.yaml +++ b/mmv1/products/compute/go_MachineImage.yaml @@ -36,7 +36,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/global/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_ManagedSslCertificate.yaml b/mmv1/products/compute/go_ManagedSslCertificate.yaml index a83f40296587..18c4f8028e3e 100644 --- a/mmv1/products/compute/go_ManagedSslCertificate.yaml +++ b/mmv1/products/compute/go_ManagedSslCertificate.yaml @@ -51,7 +51,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/global/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 @@ -96,7 +96,6 @@ properties: type: Integer description: 'The unique identifier for the resource.' api_name: id - default_from_api: true output: true - name: 'name' type: String diff --git a/mmv1/products/compute/go_Network.yaml b/mmv1/products/compute/go_Network.yaml index cec97ad33d59..690cdd17dc0c 100644 --- a/mmv1/products/compute/go_Network.yaml +++ b/mmv1/products/compute/go_Network.yaml @@ -33,7 +33,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/global/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_NetworkEdgeSecurityService.yaml b/mmv1/products/compute/go_NetworkEdgeSecurityService.yaml index 2a79092c8698..f186199aba04 100644 --- a/mmv1/products/compute/go_NetworkEdgeSecurityService.yaml +++ b/mmv1/products/compute/go_NetworkEdgeSecurityService.yaml @@ -38,7 +38,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_NetworkEndpoint.yaml b/mmv1/products/compute/go_NetworkEndpoint.yaml index abe4429c67f6..861d6acbb722 100644 --- a/mmv1/products/compute/go_NetworkEndpoint.yaml +++ b/mmv1/products/compute/go_NetworkEndpoint.yaml @@ -47,7 +47,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/zones/{{zone}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_NetworkEndpointGroup.yaml b/mmv1/products/compute/go_NetworkEndpointGroup.yaml index c5d3c85558e8..12a7f6adfbb3 100644 --- a/mmv1/products/compute/go_NetworkEndpointGroup.yaml +++ b/mmv1/products/compute/go_NetworkEndpointGroup.yaml @@ -46,7 +46,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/zones/{{zone}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_NetworkEndpoints.yaml b/mmv1/products/compute/go_NetworkEndpoints.yaml index 7242e983788f..0320bffeef75 100644 --- a/mmv1/products/compute/go_NetworkEndpoints.yaml +++ b/mmv1/products/compute/go_NetworkEndpoints.yaml @@ -33,7 +33,7 @@ references: 'Official Documentation': 'https://cloud.google.com/load-balancing/docs/negs/' api: 'https://cloud.google.com/compute/docs/reference/rest/beta/networkEndpointGroups' docs: -id_format: '{{project}}/{{zone}}/{{network_endpoint_group}}/endpoints' +id_format: '{{project}}/{{zone}}/{{network_endpoint_group}}' base_url: 'projects/{{project}}/zones/{{zone}}/networkEndpointGroups/{{network_endpoint_group}}' self_link: 'projects/{{project}}/zones/{{zone}}/networkEndpointGroups/{{network_endpoint_group}}/listNetworkEndpoints' create_url: 'projects/{{project}}/zones/{{zone}}/networkEndpointGroups/{{network_endpoint_group}}/attachNetworkEndpoints' @@ -53,7 +53,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/zones/{{zone}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_NetworkFirewallPolicy.yaml b/mmv1/products/compute/go_NetworkFirewallPolicy.yaml index 6324ccdf3966..feb6e205653a 100644 --- a/mmv1/products/compute/go_NetworkFirewallPolicy.yaml +++ b/mmv1/products/compute/go_NetworkFirewallPolicy.yaml @@ -28,7 +28,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/global/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_NetworkPeeringRoutesConfig.yaml b/mmv1/products/compute/go_NetworkPeeringRoutesConfig.yaml index bfc331e0f42d..588b1897eea6 100644 --- a/mmv1/products/compute/go_NetworkPeeringRoutesConfig.yaml +++ b/mmv1/products/compute/go_NetworkPeeringRoutesConfig.yaml @@ -43,7 +43,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/global/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 @@ -79,6 +79,7 @@ examples: deletion_protection: 'true' test_vars_overrides: 'deletion_protection': 'false' + skip_vcr: true parameters: - name: 'network' type: ResourceRef diff --git a/mmv1/products/compute/go_NodeGroup.yaml b/mmv1/products/compute/go_NodeGroup.yaml index 16df58318d41..c16e81b24363 100644 --- a/mmv1/products/compute/go_NodeGroup.yaml +++ b/mmv1/products/compute/go_NodeGroup.yaml @@ -35,7 +35,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/zones/{{zone}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_NodeTemplate.yaml b/mmv1/products/compute/go_NodeTemplate.yaml index ae6d1d82a6ad..d6a061d91ec1 100644 --- a/mmv1/products/compute/go_NodeTemplate.yaml +++ b/mmv1/products/compute/go_NodeTemplate.yaml @@ -35,7 +35,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 @@ -56,6 +56,10 @@ examples: primary_resource_id: 'template' vars: template_name: 'soletenant-with-licenses' + - name: 'node_template_accelerators' + primary_resource_id: 'template' + vars: + template_name: 'soletenant-with-accelerators' parameters: - name: 'region' type: ResourceRef @@ -146,6 +150,24 @@ properties: enum_values: - 'RESTART_NODE_ON_ANY_SERVER' - 'RESTART_NODE_ON_MINIMAL_SERVERS' + - name: 'accelerators' + type: Array + description: | + List of the type and count of accelerator cards attached to the + node template + item_type: + type: NestedObject + properties: + - name: 'acceleratorCount' + type: Integer + description: | + The number of the guest accelerator cards exposed to this + node template. + - name: 'acceleratorType' + type: String + description: | + Full or partial URL of the accelerator type resource to expose + to this node template. - name: 'cpuOvercommitType' type: Enum description: | diff --git a/mmv1/products/compute/go_PacketMirroring.yaml b/mmv1/products/compute/go_PacketMirroring.yaml index 99ff103ff7f7..63a6871bda00 100644 --- a/mmv1/products/compute/go_PacketMirroring.yaml +++ b/mmv1/products/compute/go_PacketMirroring.yaml @@ -34,7 +34,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_PerInstanceConfig.yaml b/mmv1/products/compute/go_PerInstanceConfig.yaml index be33eac134ed..724932323c04 100644 --- a/mmv1/products/compute/go_PerInstanceConfig.yaml +++ b/mmv1/products/compute/go_PerInstanceConfig.yaml @@ -40,7 +40,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/zones/{{zone}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_ProjectCloudArmorTier.yaml b/mmv1/products/compute/go_ProjectCloudArmorTier.yaml index 9b086511eed3..cb4daf09f4b2 100644 --- a/mmv1/products/compute/go_ProjectCloudArmorTier.yaml +++ b/mmv1/products/compute/go_ProjectCloudArmorTier.yaml @@ -38,7 +38,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/global/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_PublicAdvertisedPrefix.yaml b/mmv1/products/compute/go_PublicAdvertisedPrefix.yaml index 42558bd8a439..5cd3db293ebc 100644 --- a/mmv1/products/compute/go_PublicAdvertisedPrefix.yaml +++ b/mmv1/products/compute/go_PublicAdvertisedPrefix.yaml @@ -32,7 +32,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/global/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_PublicDelegatedPrefix.yaml b/mmv1/products/compute/go_PublicDelegatedPrefix.yaml index 5144500c2ac1..8fbf38ddac9a 100644 --- a/mmv1/products/compute/go_PublicDelegatedPrefix.yaml +++ b/mmv1/products/compute/go_PublicDelegatedPrefix.yaml @@ -32,7 +32,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_RegionAutoscaler.yaml b/mmv1/products/compute/go_RegionAutoscaler.yaml index 494945ff6369..da1746780c73 100644 --- a/mmv1/products/compute/go_RegionAutoscaler.yaml +++ b/mmv1/products/compute/go_RegionAutoscaler.yaml @@ -37,7 +37,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_RegionBackendService.yaml b/mmv1/products/compute/go_RegionBackendService.yaml index f8b2e6a04482..33dc130adcd5 100644 --- a/mmv1/products/compute/go_RegionBackendService.yaml +++ b/mmv1/products/compute/go_RegionBackendService.yaml @@ -33,7 +33,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 @@ -149,9 +149,7 @@ properties: See the [Backend Services Overview](https://cloud.google.com/load-balancing/docs/backend-service#balancing-mode) for an explanation of load balancing modes. - - From version 6.0.0 default value will be UTILIZATION to match default GCP value. - default_value: "CONNECTION" + default_value: "UTILIZATION" enum_values: - 'UTILIZATION' - 'RATE' @@ -651,11 +649,9 @@ properties: description: | Time for which instance will be drained (not accept new connections, but still work to finish started). - - From version 6.0.0 ConnectionDrainingTimeoutSec default value will be 300 to match default GCP value. api_name: drainingTimeoutSec send_empty_value: true - default_value: 0 + default_value: 300 - name: 'creationTimestamp' type: Time description: | @@ -752,16 +748,18 @@ properties: description: Settings for enabling Cloud Identity Aware Proxy send_empty_value: true properties: + - name: 'enabled' + type: Boolean + description: Whether the serving infrastructure will authenticate and authorize all incoming requests. + required: true - name: 'oauth2ClientId' type: String description: | OAuth2 Client ID for IAP - required: true - name: 'oauth2ClientSecret' type: String description: | OAuth2 Client Secret for IAP - required: true ignore_read: true sensitive: true send_empty_value: true @@ -869,9 +867,6 @@ properties: Settings controlling eviction of unhealthy hosts from the load balancing pool. This field is applicable only when the `load_balancing_scheme` is set to INTERNAL_MANAGED and the `protocol` is set to HTTP, HTTPS, or HTTP2. - - From version 6.0.0 outlierDetection default terraform values will be removed to match default GCP value. - Default values are enforce by GCP without providing them. properties: - name: 'baseEjectionTime' type: NestedObject @@ -922,7 +917,6 @@ properties: - 'outlier_detection.0.success_rate_minimum_hosts' - 'outlier_detection.0.success_rate_request_volume' - 'outlier_detection.0.success_rate_stdev_factor' - default_value: 5 - name: 'consecutiveGatewayFailure' type: Integer description: | @@ -941,7 +935,6 @@ properties: - 'outlier_detection.0.success_rate_minimum_hosts' - 'outlier_detection.0.success_rate_request_volume' - 'outlier_detection.0.success_rate_stdev_factor' - default_value: 5 - name: 'enforcingConsecutiveErrors' type: Integer description: | @@ -960,7 +953,6 @@ properties: - 'outlier_detection.0.success_rate_minimum_hosts' - 'outlier_detection.0.success_rate_request_volume' - 'outlier_detection.0.success_rate_stdev_factor' - default_value: 100 - name: 'enforcingConsecutiveGatewayFailure' type: Integer description: | @@ -979,7 +971,6 @@ properties: - 'outlier_detection.0.success_rate_minimum_hosts' - 'outlier_detection.0.success_rate_request_volume' - 'outlier_detection.0.success_rate_stdev_factor' - default_value: 0 - name: 'enforcingSuccessRate' type: Integer description: | @@ -998,7 +989,6 @@ properties: - 'outlier_detection.0.success_rate_minimum_hosts' - 'outlier_detection.0.success_rate_request_volume' - 'outlier_detection.0.success_rate_stdev_factor' - default_value: 100 - name: 'interval' type: NestedObject description: | @@ -1046,7 +1036,6 @@ properties: - 'outlier_detection.0.success_rate_minimum_hosts' - 'outlier_detection.0.success_rate_request_volume' - 'outlier_detection.0.success_rate_stdev_factor' - default_value: 10 - name: 'successRateMinimumHosts' type: Integer description: | @@ -1066,7 +1055,6 @@ properties: - 'outlier_detection.0.success_rate_minimum_hosts' - 'outlier_detection.0.success_rate_request_volume' - 'outlier_detection.0.success_rate_stdev_factor' - default_value: 5 - name: 'successRateRequestVolume' type: Integer description: | @@ -1087,7 +1075,6 @@ properties: - 'outlier_detection.0.success_rate_minimum_hosts' - 'outlier_detection.0.success_rate_request_volume' - 'outlier_detection.0.success_rate_stdev_factor' - default_value: 100 - name: 'successRateStdevFactor' type: Integer description: | @@ -1109,7 +1096,6 @@ properties: - 'outlier_detection.0.success_rate_minimum_hosts' - 'outlier_detection.0.success_rate_request_volume' - 'outlier_detection.0.success_rate_stdev_factor' - default_value: 1900 - name: 'portName' type: String description: | diff --git a/mmv1/products/compute/go_RegionCommitment.yaml b/mmv1/products/compute/go_RegionCommitment.yaml index f01b2ff61484..083d0215efeb 100644 --- a/mmv1/products/compute/go_RegionCommitment.yaml +++ b/mmv1/products/compute/go_RegionCommitment.yaml @@ -38,7 +38,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_RegionDisk.yaml b/mmv1/products/compute/go_RegionDisk.yaml index db2bc7196d50..7fac3d3a0c88 100644 --- a/mmv1/products/compute/go_RegionDisk.yaml +++ b/mmv1/products/compute/go_RegionDisk.yaml @@ -47,7 +47,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_RegionDiskResourcePolicyAttachment.yaml b/mmv1/products/compute/go_RegionDiskResourcePolicyAttachment.yaml index 1c41e05ebb19..f70f7da5fd89 100644 --- a/mmv1/products/compute/go_RegionDiskResourcePolicyAttachment.yaml +++ b/mmv1/products/compute/go_RegionDiskResourcePolicyAttachment.yaml @@ -35,7 +35,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_RegionHealthCheck.yaml b/mmv1/products/compute/go_RegionHealthCheck.yaml index 62dcea119fdc..c53f7405f68e 100644 --- a/mmv1/products/compute/go_RegionHealthCheck.yaml +++ b/mmv1/products/compute/go_RegionHealthCheck.yaml @@ -42,7 +42,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_RegionInstanceGroupManager.yaml b/mmv1/products/compute/go_RegionInstanceGroupManager.yaml index 46c46ed67bca..ef3fb3c81954 100644 --- a/mmv1/products/compute/go_RegionInstanceGroupManager.yaml +++ b/mmv1/products/compute/go_RegionInstanceGroupManager.yaml @@ -36,7 +36,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_RegionNetworkEndpoint.yaml b/mmv1/products/compute/go_RegionNetworkEndpoint.yaml index 347a086a82bb..b8b956c01adf 100644 --- a/mmv1/products/compute/go_RegionNetworkEndpoint.yaml +++ b/mmv1/products/compute/go_RegionNetworkEndpoint.yaml @@ -43,7 +43,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_RegionNetworkEndpointGroup.yaml b/mmv1/products/compute/go_RegionNetworkEndpointGroup.yaml index 6a57b76776d1..916e7fb82e2c 100644 --- a/mmv1/products/compute/go_RegionNetworkEndpointGroup.yaml +++ b/mmv1/products/compute/go_RegionNetworkEndpointGroup.yaml @@ -39,7 +39,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_RegionNetworkFirewallPolicy.yaml b/mmv1/products/compute/go_RegionNetworkFirewallPolicy.yaml index e172f020417b..f727fdbc6f72 100644 --- a/mmv1/products/compute/go_RegionNetworkFirewallPolicy.yaml +++ b/mmv1/products/compute/go_RegionNetworkFirewallPolicy.yaml @@ -28,7 +28,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_RegionPerInstanceConfig.yaml b/mmv1/products/compute/go_RegionPerInstanceConfig.yaml index e0e532997721..2da626974fda 100644 --- a/mmv1/products/compute/go_RegionPerInstanceConfig.yaml +++ b/mmv1/products/compute/go_RegionPerInstanceConfig.yaml @@ -41,7 +41,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_RegionSecurityPolicy.yaml b/mmv1/products/compute/go_RegionSecurityPolicy.yaml index f6473c18f264..65d078bd284d 100644 --- a/mmv1/products/compute/go_RegionSecurityPolicy.yaml +++ b/mmv1/products/compute/go_RegionSecurityPolicy.yaml @@ -36,7 +36,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_RegionSecurityPolicyRule.yaml b/mmv1/products/compute/go_RegionSecurityPolicyRule.yaml index 370fb57bf869..a8ca5e9bfdbd 100644 --- a/mmv1/products/compute/go_RegionSecurityPolicyRule.yaml +++ b/mmv1/products/compute/go_RegionSecurityPolicyRule.yaml @@ -41,7 +41,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_RegionSslCertificate.yaml b/mmv1/products/compute/go_RegionSslCertificate.yaml index f3c5a78db137..6893009d4216 100644 --- a/mmv1/products/compute/go_RegionSslCertificate.yaml +++ b/mmv1/products/compute/go_RegionSslCertificate.yaml @@ -26,7 +26,13 @@ references: docs: optional_properties: | * `name_prefix` - (Optional) Creates a unique name beginning with the - specified prefix. Conflicts with `name`. + specified prefix. Conflicts with `name`. Max length is 54 characters. + Prefixes with lengths longer than 37 characters will use a shortened + UUID that will be more prone to collisions. + Resulting name for a `name_prefix` <= 37 characters: + `name_prefix` + YYYYmmddHHSSssss + 8 digit incremental counter + Resulting name for a `name_prefix` 38 - 54 characters: + `name_prefix` + YYmmdd + 3 digit incremental counter base_url: 'projects/{{project}}/regions/{{region}}/sslCertificates' has_self_link: true immutable: true @@ -38,7 +44,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_RegionSslPolicy.yaml b/mmv1/products/compute/go_RegionSslPolicy.yaml index 25f9dabca134..baab545b92e2 100644 --- a/mmv1/products/compute/go_RegionSslPolicy.yaml +++ b/mmv1/products/compute/go_RegionSslPolicy.yaml @@ -34,7 +34,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_RegionTargetHttpProxy.yaml b/mmv1/products/compute/go_RegionTargetHttpProxy.yaml index e82dbaa539fe..d0c2c2295b89 100644 --- a/mmv1/products/compute/go_RegionTargetHttpProxy.yaml +++ b/mmv1/products/compute/go_RegionTargetHttpProxy.yaml @@ -33,7 +33,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_RegionTargetHttpsProxy.yaml b/mmv1/products/compute/go_RegionTargetHttpsProxy.yaml index f3e5b50a1ee3..3a870fb4eb03 100644 --- a/mmv1/products/compute/go_RegionTargetHttpsProxy.yaml +++ b/mmv1/products/compute/go_RegionTargetHttpsProxy.yaml @@ -33,7 +33,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 @@ -180,5 +180,14 @@ properties: INTERNAL_SELF_MANAGED and which with EXTERNAL, EXTERNAL_MANAGED loadBalancingScheme consult ServerTlsPolicy documentation. If left blank, communications are not encrypted. + + If you remove this field from your configuration at the same time as + deleting or recreating a referenced ServerTlsPolicy resource, you will + receive a resourceInUseByAnotherResource error. Use lifecycle.create_before_destroy + within the ServerTlsPolicy resource to avoid this. + update_url: 'projects/{{project}}/regions/{{region}}/targetHttpsProxies/{{name}}' + update_verb: 'PATCH' + update_id: 'serverTlsPolicy' + fingerprint_name: 'fingerprint' resource: 'SslPolicy' imports: 'selfLink' diff --git a/mmv1/products/compute/go_RegionTargetTcpProxy.yaml b/mmv1/products/compute/go_RegionTargetTcpProxy.yaml index b787d8fd3ae8..66f45937d3fd 100644 --- a/mmv1/products/compute/go_RegionTargetTcpProxy.yaml +++ b/mmv1/products/compute/go_RegionTargetTcpProxy.yaml @@ -34,7 +34,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_RegionUrlMap.yaml b/mmv1/products/compute/go_RegionUrlMap.yaml index afa052ed8e67..8804464c6851 100644 --- a/mmv1/products/compute/go_RegionUrlMap.yaml +++ b/mmv1/products/compute/go_RegionUrlMap.yaml @@ -29,7 +29,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_Reservation.yaml b/mmv1/products/compute/go_Reservation.yaml index b24fab3c88b6..30401dcdd6c1 100644 --- a/mmv1/products/compute/go_Reservation.yaml +++ b/mmv1/products/compute/go_Reservation.yaml @@ -41,7 +41,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/zones/{{zone}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_ResizeRequest.yaml b/mmv1/products/compute/go_ResizeRequest.yaml index 6a1c965eef58..0ef3a61c7849 100644 --- a/mmv1/products/compute/go_ResizeRequest.yaml +++ b/mmv1/products/compute/go_ResizeRequest.yaml @@ -37,7 +37,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/zones/{{zone}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_ResourcePolicy.yaml b/mmv1/products/compute/go_ResourcePolicy.yaml index 38a7877c8d83..035ab0cc7644 100644 --- a/mmv1/products/compute/go_ResourcePolicy.yaml +++ b/mmv1/products/compute/go_ResourcePolicy.yaml @@ -33,7 +33,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 @@ -45,11 +45,17 @@ async: message: 'message' collection_url_key: 'items' custom_code: + constants: 'templates/terraform/constants/go/compute_resource_policy.go.tmpl' examples: - name: 'resource_policy_basic' primary_resource_id: 'foo' vars: name: 'gce-policy' + - name: 'resource_policy_hourly_format' + primary_resource_id: 'foo' + vars: + name: 'gce-policy' + skip_docs: true - name: 'resource_policy_full' primary_resource_id: 'bar' vars: @@ -138,6 +144,7 @@ properties: It must be in an hourly format "HH:MM", where HH : [00-23] and MM : [00] GMT. eg: 21:00 required: true + diff_suppress_func: 'HourlyFormatSuppressDiff' validation: function: 'verify.ValidateHourlyOnly' - name: 'dailySchedule' @@ -161,6 +168,7 @@ properties: 00:00, 04:00, 08:00, 12:00, 16:00, or 20:00. For example, both 13:00-5 and 08:00 are valid. required: true + diff_suppress_func: 'HourlyFormatSuppressDiff' validation: function: 'verify.ValidateHourlyOnly' - name: 'weeklySchedule' diff --git a/mmv1/products/compute/go_Route.yaml b/mmv1/products/compute/go_Route.yaml index af854f00bb37..089cc6ff29b9 100644 --- a/mmv1/products/compute/go_Route.yaml +++ b/mmv1/products/compute/go_Route.yaml @@ -61,7 +61,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/global/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_Router.yaml b/mmv1/products/compute/go_Router.yaml index 530a6a8142c9..a444f3212099 100644 --- a/mmv1/products/compute/go_Router.yaml +++ b/mmv1/products/compute/go_Router.yaml @@ -34,7 +34,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_RouterNat.yaml b/mmv1/products/compute/go_RouterNat.yaml index bd91cff799c6..fc97f73023bb 100644 --- a/mmv1/products/compute/go_RouterNat.yaml +++ b/mmv1/products/compute/go_RouterNat.yaml @@ -39,7 +39,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/regions/{{regions}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_RouterRoutePolicy.yaml b/mmv1/products/compute/go_RouterRoutePolicy.yaml index afa5e8f73af3..67d22276db4d 100644 --- a/mmv1/products/compute/go_RouterRoutePolicy.yaml +++ b/mmv1/products/compute/go_RouterRoutePolicy.yaml @@ -40,7 +40,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/regions/{{regions}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_SecurityPolicyRule.yaml b/mmv1/products/compute/go_SecurityPolicyRule.yaml index 38faecb63b84..cb2fd3bce77a 100644 --- a/mmv1/products/compute/go_SecurityPolicyRule.yaml +++ b/mmv1/products/compute/go_SecurityPolicyRule.yaml @@ -40,7 +40,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/global/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_ServiceAttachment.yaml b/mmv1/products/compute/go_ServiceAttachment.yaml index f35f6bb89b94..b10914454e7f 100644 --- a/mmv1/products/compute/go_ServiceAttachment.yaml +++ b/mmv1/products/compute/go_ServiceAttachment.yaml @@ -33,7 +33,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_SslCertificate.yaml b/mmv1/products/compute/go_SslCertificate.yaml index 0c12f39c56b0..913043468170 100644 --- a/mmv1/products/compute/go_SslCertificate.yaml +++ b/mmv1/products/compute/go_SslCertificate.yaml @@ -26,7 +26,13 @@ references: docs: optional_properties: | * `name_prefix` - (Optional) Creates a unique name beginning with the - specified prefix. Conflicts with `name`. + specified prefix. Conflicts with `name`. Max length is 54 characters. + Prefixes with lengths longer than 37 characters will use a shortened + UUID that will be more prone to collisions. + Resulting name for a `name_prefix` <= 37 characters: + `name_prefix` + YYYYmmddHHSSssss + 8 digit incremental counter + Resulting name for a `name_prefix` 38 - 54 characters: + `name_prefix` + YYmmdd + 3 digit incremental counter base_url: 'projects/{{project}}/global/sslCertificates' has_self_link: true immutable: true @@ -38,7 +44,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/global/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_SslPolicy.yaml b/mmv1/products/compute/go_SslPolicy.yaml index fedccef00273..7ee4898da65e 100644 --- a/mmv1/products/compute/go_SslPolicy.yaml +++ b/mmv1/products/compute/go_SslPolicy.yaml @@ -34,7 +34,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/global/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_Subnetwork.yaml b/mmv1/products/compute/go_Subnetwork.yaml index 288bb3b3429e..ed9e1d601931 100644 --- a/mmv1/products/compute/go_Subnetwork.yaml +++ b/mmv1/products/compute/go_Subnetwork.yaml @@ -55,7 +55,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 @@ -74,8 +74,10 @@ iam_policy: custom_code: extra_schema_entry: 'templates/terraform/extra_schema_entry/go/subnetwork.tmpl' constants: 'templates/terraform/constants/go/subnetwork.tmpl' + post_update: 'templates/terraform/post_update/go/compute_subnetwork.go.tmpl' custom_diff: - 'customdiff.ForceNewIfChange("ip_cidr_range", IsShrinkageIpCidr)' + - 'sendSecondaryIpRangeIfEmptyDiff' examples: - name: 'subnetwork_basic' primary_resource_id: 'network-with-private-secondary-ip-ranges' @@ -116,6 +118,28 @@ examples: vars: subnetwork_name: 'subnet-cidr-overlap' network_name: 'net-cidr-overlap' + - name: 'subnetwork_reserved_internal_range' + primary_resource_id: 'subnetwork-reserved-internal-range' + min_version: 'beta' + vars: + subnetwork_name: 'subnetwork-reserved-internal-range' + network_name: 'network-reserved-internal-range' + - name: 'subnetwork_reserved_secondary_range' + primary_resource_id: 'subnetwork-reserved-secondary-range' + min_version: 'beta' + vars: + subnetwork_name: 'subnetwork-reserved-secondary-range' + network_name: 'network-reserved-secondary-range' +virtual_fields: + - name: 'send_secondary_ip_range_if_empty' + description: | + Controls the removal behavior of secondary_ip_range. + When false, removing secondary_ip_range from config will not produce a diff as + the provider will default to the API's value. + When true, the provider will treat removing secondary_ip_range as sending an + empty list of secondary IP ranges to the API. + Defaults to false. + type: Boolean parameters: properties: - name: 'creationTimestamp' @@ -141,11 +165,20 @@ properties: Provide this property when you create the subnetwork. For example, 10.0.0.0/8 or 192.168.0.0/16. Ranges must be unique and non-overlapping within a network. Only IPv4 is supported. - required: true + Field is optional when `reserved_internal_range` is defined, otherwise required. + required: false + default_from_api: true update_url: 'projects/{{project}}/regions/{{region}}/subnetworks/{{name}}/expandIpCidrRange' update_verb: 'POST' validation: function: 'verify.ValidateIpCidrRange' + - name: 'reservedInternalRange' + type: ResourceRef + description: | + The ID of the reserved internal range. Must be prefixed with `networkconnectivity.googleapis.com` + E.g. `networkconnectivity.googleapis.com/projects/{project}/locations/global/internalRanges/{rangeId}` + resource: 'InternalRange' + imports: 'selfLink' - name: 'name' type: String description: | @@ -205,13 +238,10 @@ properties: to either primary or secondary ranges. **Note**: This field uses [attr-as-block mode](https://www.terraform.io/docs/configuration/attr-as-blocks.html) to avoid - breaking users during the 0.12 upgrade. To explicitly send a list - of zero objects you must use the following syntax: - `example=[]` - For more details about this behavior, see [this section](https://www.terraform.io/docs/configuration/attr-as-blocks.html#defining-a-fixed-object-collection-value). + breaking users during the 0.12 upgrade. To explicitly send a list of zero objects, + set `send_secondary_ip_range_if_empty = true` api_name: secondaryIpRanges unordered_list: true - schema_config_mode_attr: true default_from_api: true send_empty_value: true update_url: 'projects/{{project}}/regions/{{region}}/subnetworks/{{name}}' @@ -238,9 +268,18 @@ properties: range. Provide this property when you create the subnetwork. Ranges must be unique and non-overlapping with all primary and secondary IP ranges within a network. Only IPv4 is supported. - required: true + Field is optional when `reserved_internal_range` is defined, otherwise required. + required: false + default_from_api: true validation: function: 'verify.ValidateIpCidrRange' + - name: 'reservedInternalRange' + type: ResourceRef + description: | + The ID of the reserved internal range. Must be prefixed with `networkconnectivity.googleapis.com` + E.g. `networkconnectivity.googleapis.com/projects/{project}/locations/global/internalRanges/{rangeId}` + resource: 'InternalRange' + imports: 'selfLink' - name: 'privateIpGoogleAccess' type: Boolean description: | diff --git a/mmv1/products/compute/go_TargetGrpcProxy.yaml b/mmv1/products/compute/go_TargetGrpcProxy.yaml index 74b096d7d4b5..d4f48f713af9 100644 --- a/mmv1/products/compute/go_TargetGrpcProxy.yaml +++ b/mmv1/products/compute/go_TargetGrpcProxy.yaml @@ -36,7 +36,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/global/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_TargetHttpProxy.yaml b/mmv1/products/compute/go_TargetHttpProxy.yaml index aceddd3b42da..6291862a1be6 100644 --- a/mmv1/products/compute/go_TargetHttpProxy.yaml +++ b/mmv1/products/compute/go_TargetHttpProxy.yaml @@ -34,7 +34,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/global/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_TargetHttpsProxy.yaml b/mmv1/products/compute/go_TargetHttpsProxy.yaml index f37b0918cc8e..c22e7eed42a8 100644 --- a/mmv1/products/compute/go_TargetHttpsProxy.yaml +++ b/mmv1/products/compute/go_TargetHttpsProxy.yaml @@ -34,7 +34,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/global/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 @@ -229,5 +229,13 @@ properties: INTERNAL_SELF_MANAGED and which with EXTERNAL, EXTERNAL_MANAGED loadBalancingScheme consult ServerTlsPolicy documentation. If left blank, communications are not encrypted. - resource: 'SslPolicy' + + If you remove this field from your configuration at the same time as + deleting or recreating a referenced ServerTlsPolicy resource, you will + receive a resourceInUseByAnotherResource error. Use lifecycle.create_before_destroy + within the ServerTlsPolicy resource to avoid this. + update_url: 'projects/{{project}}/global/targetHttpsProxies/{{name}}' + update_verb: 'PATCH' + fingerprint_name: 'fingerprint' + resource: 'ServerTlsPolicy' imports: 'selfLink' diff --git a/mmv1/products/compute/go_TargetInstance.yaml b/mmv1/products/compute/go_TargetInstance.yaml index 3f9b985e344b..085880499322 100644 --- a/mmv1/products/compute/go_TargetInstance.yaml +++ b/mmv1/products/compute/go_TargetInstance.yaml @@ -38,7 +38,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/zones/{{zone}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_TargetSslProxy.yaml b/mmv1/products/compute/go_TargetSslProxy.yaml index f77aac55132b..c18954fcb63e 100644 --- a/mmv1/products/compute/go_TargetSslProxy.yaml +++ b/mmv1/products/compute/go_TargetSslProxy.yaml @@ -35,7 +35,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/global/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_TargetTcpProxy.yaml b/mmv1/products/compute/go_TargetTcpProxy.yaml index e135ab5eed35..88ca21a43799 100644 --- a/mmv1/products/compute/go_TargetTcpProxy.yaml +++ b/mmv1/products/compute/go_TargetTcpProxy.yaml @@ -35,7 +35,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/global/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_UrlMap.yaml b/mmv1/products/compute/go_UrlMap.yaml index c6febafa9843..49ab1fa5a2c1 100644 --- a/mmv1/products/compute/go_UrlMap.yaml +++ b/mmv1/products/compute/go_UrlMap.yaml @@ -32,7 +32,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/global/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_VpnGateway.yaml b/mmv1/products/compute/go_VpnGateway.yaml index 54202d714178..24be342df8cc 100644 --- a/mmv1/products/compute/go_VpnGateway.yaml +++ b/mmv1/products/compute/go_VpnGateway.yaml @@ -36,7 +36,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/compute/go_VpnTunnel.yaml b/mmv1/products/compute/go_VpnTunnel.yaml index 56bb530a4375..116133853a5f 100644 --- a/mmv1/products/compute/go_VpnTunnel.yaml +++ b/mmv1/products/compute/go_VpnTunnel.yaml @@ -33,7 +33,7 @@ async: actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: - base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + base_url: '{{op_id}}' kind: 'compute#operation' path: 'name' wait_ms: 1000 diff --git a/mmv1/products/containerattached/go_Cluster.yaml b/mmv1/products/containerattached/go_Cluster.yaml index 239916e6afab..75869c34066d 100644 --- a/mmv1/products/containerattached/go_Cluster.yaml +++ b/mmv1/products/containerattached/go_Cluster.yaml @@ -133,7 +133,8 @@ properties: type: String description: | The Kubernetes distribution of the underlying attached cluster. Supported values: - "eks", "aks". + "eks", "aks", "generic". The generic distribution provides the ability to register + or migrate any CNCF conformant cluster. required: true immutable: true - name: 'clusterRegion' diff --git a/mmv1/products/databasemigrationservice/go_ConnectionProfile.yaml b/mmv1/products/databasemigrationservice/go_ConnectionProfile.yaml index be3934a600a5..8e8d67cc9efc 100644 --- a/mmv1/products/databasemigrationservice/go_ConnectionProfile.yaml +++ b/mmv1/products/databasemigrationservice/go_ConnectionProfile.yaml @@ -94,6 +94,21 @@ examples: ignore_read_extra: - 'alloydb.0.settings.0.initial_user.0.password' skip_test: true + - name: 'database_migration_service_connection_profile_existing_mysql' + primary_resource_id: 'existing-mysql' + vars: + destination_csql: 'destination-csql' + destination_cp: 'destination-cp' + - name: 'database_migration_service_connection_profile_existing_postgres' + primary_resource_id: 'existing-psql' + vars: + destination_csql: 'destination-csql' + destination_cp: 'destination-cp' + - name: 'database_migration_service_connection_profile_existing_alloydb' + primary_resource_id: 'existing-alloydb' + vars: + destination_alloydb: 'destination-alloydb' + destination_cp: 'destination-cp' parameters: - name: 'connectionProfileId' type: String @@ -127,7 +142,6 @@ properties: type: KeyValueLabels description: | The resource labels for connection profile to use to annotate any related underlying resources such as Compute Engine VMs. - immutable: false - name: 'state' type: Enum description: | @@ -185,24 +199,29 @@ properties: - name: 'host' type: String description: | - Required. The IP or hostname of the source MySQL database. - required: true + The IP or hostname of the source MySQL database. + required_with: + - 'mysql.0.port' + - 'mysql.0.username' - name: 'port' type: Integer description: | - Required. The network port of the source MySQL database. - required: true + The network port of the source MySQL database. + required_with: + - 'mysql.0.host' + - 'mysql.0.username' - name: 'username' type: String description: | - Required. The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. - required: true + The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. + required_with: + - 'mysql.0.host' + - 'mysql.0.port' - name: 'password' type: String description: | - Required. Input only. The password for the user that Database Migration Service will be using to connect to the database. + Input only. The password for the user that Database Migration Service will be using to connect to the database. This field is not returned on request, and the value is encrypted when stored in Database Migration Service. - required: true immutable: true sensitive: true custom_flatten: 'templates/terraform/custom_flatten/go/database_migration_service_connection_profile_mysql_password.go.tmpl' @@ -267,26 +286,38 @@ properties: - name: 'host' type: String description: | - Required. The IP or hostname of the source MySQL database. - required: true + The IP or hostname of the source MySQL database. + required_with: + - 'postgresql.0.port' + - 'postgresql.0.username' + - 'postgresql.0.password' - name: 'port' type: Integer description: | - Required. The network port of the source MySQL database. - required: true + The network port of the source MySQL database. + required_with: + - 'postgresql.0.host' + - 'postgresql.0.username' + - 'postgresql.0.password' - name: 'username' type: String description: | - Required. The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. - required: true + The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. + required_with: + - 'postgresql.0.host' + - 'postgresql.0.port' + - 'postgresql.0.password' - name: 'password' type: String description: | - Required. Input only. The password for the user that Database Migration Service will be using to connect to the database. + Input only. The password for the user that Database Migration Service will be using to connect to the database. This field is not returned on request, and the value is encrypted when stored in Database Migration Service. - required: true immutable: true sensitive: true + required_with: + - 'postgresql.0.host' + - 'postgresql.0.port' + - 'postgresql.0.username' custom_flatten: 'templates/terraform/custom_flatten/go/database_migration_service_connection_profile_postgresql_password.go.tmpl' - name: 'passwordSet' type: Boolean @@ -339,6 +370,10 @@ properties: type: String description: | If the source is a Cloud SQL database, use this field to provide the Cloud SQL instance ID of the source. + - name: 'alloydbClusterId' + type: String + description: | + If the connected database is an AlloyDB instance, use this field to provide the AlloyDB cluster ID. - name: 'networkArchitecture' type: Enum description: | diff --git a/mmv1/products/databasemigrationservice/go_PrivateConnection.yaml b/mmv1/products/databasemigrationservice/go_PrivateConnection.yaml index aab6d6f0caa8..b14a6ca551b6 100644 --- a/mmv1/products/databasemigrationservice/go_PrivateConnection.yaml +++ b/mmv1/products/databasemigrationservice/go_PrivateConnection.yaml @@ -49,8 +49,6 @@ examples: vars: private_connection_id: 'my-connection' network_name: 'my-network' - test_vars_overrides: - 'network_name': 'acctest.BootstrapSharedTestNetwork(t, "dbms-privateconnection")' parameters: - name: 'privateConnectionId' type: String @@ -74,7 +72,6 @@ properties: - name: 'labels' type: KeyValueLabels description: Labels. - immutable: false - name: 'displayName' type: String description: Display name. diff --git a/mmv1/products/dataform/go_Repository.yaml b/mmv1/products/dataform/go_Repository.yaml index 9525a76fc97c..fa9cb9abfa1e 100644 --- a/mmv1/products/dataform/go_Repository.yaml +++ b/mmv1/products/dataform/go_Repository.yaml @@ -47,6 +47,8 @@ examples: dataform_repository_name: 'dataform_repository' data: 'secret-data' secret_name: 'my-secret' + key_ring_name: 'example-key-ring' + crypto_key_name: 'example-crypto-key-name' skip_test: true - name: 'dataform_repository_with_cloudsource_repo' primary_resource_id: 'dataform_repository' @@ -57,6 +59,8 @@ examples: dataform_repository_name: 'dataform_repository' data: 'secret-data' secret_name: 'my-secret' + key_ring_name: 'example-key-ring' + crypto_key_name: 'example-crypto-key-name' skip_docs: true - name: 'dataform_repository_with_cloudsource_repo_and_ssh' primary_resource_id: 'dataform_repository' @@ -161,10 +165,15 @@ properties: type: String description: Optional. The repository's user-friendly name. min_version: 'beta' + - name: 'kmsKeyName' + type: String + description: | + Optional. The reference to a KMS encryption key. If provided, it will be used to encrypt user data in the repository and all child resources. + It is not possible to add or update the encryption key after the repository is created. Example projects/[kms_project_id]/locations/[region]/keyRings/[key_region]/cryptoKeys/[key] + min_version: 'beta' - name: 'labels' type: KeyValueLabels description: | Optional. Repository user labels. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. min_version: 'beta' - immutable: false diff --git a/mmv1/products/datafusion/go_Instance.yaml b/mmv1/products/datafusion/go_Instance.yaml index a4db1f7fef0f..8b442a23bb01 100644 --- a/mmv1/products/datafusion/go_Instance.yaml +++ b/mmv1/products/datafusion/go_Instance.yaml @@ -30,6 +30,7 @@ timeouts: delete_minutes: 50 autogen_async: true async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: '{{op_id}}' @@ -44,6 +45,7 @@ async: iam_policy: method_name_separator: ':' parent_resource_attribute: 'name' + example_config_body: 'templates/terraform/iam/go/iam_attributes.go.tmpl' import_format: - 'projects/{{project}}/locations/{{location}}/instances/{{name}}' - '{{name}}' @@ -157,7 +159,6 @@ properties: description: | The resource labels for instance to use to annotate any related underlying resources, such as Compute Engine VMs. - immutable: false - name: 'options' type: KeyValuePairs description: | diff --git a/mmv1/products/dataplex/go_AspectType.yaml b/mmv1/products/dataplex/go_AspectType.yaml index a5f93176cba3..942121ef6264 100644 --- a/mmv1/products/dataplex/go_AspectType.yaml +++ b/mmv1/products/dataplex/go_AspectType.yaml @@ -116,7 +116,6 @@ properties: type: KeyValueLabels description: | User-defined labels for the AspectType. - immutable: false - name: 'metadataTemplate' type: String description: | diff --git a/mmv1/products/dataplex/go_Datascan.yaml b/mmv1/products/dataplex/go_Datascan.yaml index 04e2f29b6bc9..2277fd3499b5 100644 --- a/mmv1/products/dataplex/go_Datascan.yaml +++ b/mmv1/products/dataplex/go_Datascan.yaml @@ -16,6 +16,7 @@ name: 'Datascan' description: | Represents a user-visible job which provides the insights for the related data source. +skip_attribution_label: true references: guides: 'Official Documentation': 'https://cloud.google.com/dataplex/docs' @@ -126,7 +127,6 @@ properties: type: KeyValueLabels description: | User-defined labels for the scan. A list of key->value pairs. - immutable: false - name: 'state' type: Enum description: | diff --git a/mmv1/products/dataplex/go_EntryGroup.yaml b/mmv1/products/dataplex/go_EntryGroup.yaml index 277c3c61a44c..9669bafe26af 100644 --- a/mmv1/products/dataplex/go_EntryGroup.yaml +++ b/mmv1/products/dataplex/go_EntryGroup.yaml @@ -116,7 +116,6 @@ properties: type: KeyValueLabels description: | User-defined labels for the EntryGroup. - immutable: false - name: 'transferStatus' type: Enum description: | diff --git a/mmv1/products/dataplex/go_EntryType.yaml b/mmv1/products/dataplex/go_EntryType.yaml index ec0e7fdec87d..5af9b29a45e6 100644 --- a/mmv1/products/dataplex/go_EntryType.yaml +++ b/mmv1/products/dataplex/go_EntryType.yaml @@ -116,7 +116,6 @@ properties: type: KeyValueLabels description: | User-defined labels for the EntryType. - immutable: false - name: 'typeAliases' type: Array description: | diff --git a/mmv1/products/dataplex/go_Task.yaml b/mmv1/products/dataplex/go_Task.yaml index 31b40da3cf09..f45b8d9f4cb4 100644 --- a/mmv1/products/dataplex/go_Task.yaml +++ b/mmv1/products/dataplex/go_Task.yaml @@ -16,6 +16,7 @@ name: 'Task' description: | A Dataplex task represents the work that you want Dataplex to do on a schedule. It encapsulates code, parameters, and the schedule. +skip_attribution_label: true references: guides: 'Official Documentation': 'https://cloud.google.com/dataplex/docs' @@ -139,7 +140,6 @@ properties: type: KeyValueLabels description: | User-defined labels for the task. - immutable: false - name: 'triggerSpec' type: NestedObject description: | diff --git a/mmv1/products/datastream/Stream.yaml b/mmv1/products/datastream/Stream.yaml index 28bac8b38ec5..419080af8ed7 100644 --- a/mmv1/products/datastream/Stream.yaml +++ b/mmv1/products/datastream/Stream.yaml @@ -1020,8 +1020,6 @@ properties: name: 'transactionLogs' allow_empty_object: true send_empty_value: true - conflicts: - - source_config.0.sql_server_source_config.change_tables description: | CDC reader reads from transaction logs. properties: [] @@ -1029,8 +1027,6 @@ properties: name: 'changeTables' allow_empty_object: true send_empty_value: true - conflicts: - - source_config.0.sql_server_source_config.transaction_logs description: | CDC reader reads from change tables. properties: [] diff --git a/mmv1/products/datastream/go_ConnectionProfile.yaml b/mmv1/products/datastream/go_ConnectionProfile.yaml index 20ac6ee464ea..1a750190635f 100644 --- a/mmv1/products/datastream/go_ConnectionProfile.yaml +++ b/mmv1/products/datastream/go_ConnectionProfile.yaml @@ -123,7 +123,6 @@ properties: - name: 'labels' type: KeyValueLabels description: Labels. - immutable: false - name: 'displayName' type: String description: Display name. diff --git a/mmv1/products/datastream/go_PrivateConnection.yaml b/mmv1/products/datastream/go_PrivateConnection.yaml index af4658f81f78..7410dc1d50f2 100644 --- a/mmv1/products/datastream/go_PrivateConnection.yaml +++ b/mmv1/products/datastream/go_PrivateConnection.yaml @@ -48,6 +48,7 @@ async: custom_code: constants: 'templates/terraform/constants/go/private_connection.go.tmpl' post_create: 'templates/terraform/post_create/go/private_connection.go.tmpl' + pre_delete: 'templates/terraform/pre_delete/go/private_connection.go.tmpl' post_import: 'templates/terraform/post_import/go/private_connection.go.tmpl' skip_sweeper: true schema_version: 1 @@ -89,7 +90,6 @@ properties: - name: 'labels' type: KeyValueLabels description: Labels. - immutable: false - name: 'displayName' type: String description: Display name. diff --git a/mmv1/products/datastream/go_Stream.yaml b/mmv1/products/datastream/go_Stream.yaml index 94c13a5cd173..addcf7da8bcb 100644 --- a/mmv1/products/datastream/go_Stream.yaml +++ b/mmv1/products/datastream/go_Stream.yaml @@ -118,6 +118,21 @@ examples: test_vars_overrides: 'deletion_protection': 'false' skip_test: true + - name: 'datastream_stream_sql_server_change_tables' + primary_resource_id: 'default' + vars: + database_name: 'db' + database_password: 'password' + database_user: 'user' + deletion_protection: 'true' + destination_connection_profile_id: 'destination-profile' + source_connection_profile_id: 'source-profile' + sql_server_name: 'sql-server' + sql_server_root_password: 'root-password' + stream_id: 'stream' + test_vars_overrides: + 'deletion_protection': 'false' + skip_test: true - name: 'datastream_stream_postgresql_bigquery_dataset_id' primary_resource_id: 'default' vars: @@ -197,7 +212,6 @@ properties: - name: 'labels' type: KeyValueLabels description: Labels. - immutable: false - name: 'displayName' type: String description: Display name. @@ -1018,6 +1032,22 @@ properties: send_empty_value: true validation: function: 'validation.IntAtLeast(0)' + - name: 'transactionLogs' + type: NestedObject + description: | + CDC reader reads from transaction logs. + send_empty_value: true + allow_empty_object: true + properties: + [] + - name: 'changeTables' + type: NestedObject + description: | + CDC reader reads from change tables. + send_empty_value: true + allow_empty_object: true + properties: + [] - name: 'destinationConfig' type: NestedObject description: | @@ -1160,9 +1190,8 @@ properties: immutable: true send_empty_value: true allow_empty_object: true - exactly_one_of: - - 'destination_config.0.bigquery_destination_config.0.merge' - - 'destination_config.0.bigquery_destination_config.0.append_only' + conflicts: + - destination_config.0.bigquery_destination_config.0.append_only properties: [] - name: 'appendOnly' @@ -1174,9 +1203,8 @@ properties: immutable: true send_empty_value: true allow_empty_object: true - exactly_one_of: - - 'destination_config.0.bigquery_destination_config.0.merge' - - 'destination_config.0.bigquery_destination_config.0.append_only' + conflicts: + - destination_config.0.bigquery_destination_config.0.merge properties: [] - name: 'state' diff --git a/mmv1/products/dialogflowcx/go_Intent.yaml b/mmv1/products/dialogflowcx/go_Intent.yaml index 7d027647d9e3..0d8b1dc7314f 100644 --- a/mmv1/products/dialogflowcx/go_Intent.yaml +++ b/mmv1/products/dialogflowcx/go_Intent.yaml @@ -184,7 +184,6 @@ properties: The key/value metadata to label an intent. Labels can contain lowercase letters, digits and the symbols '-' and '_'. International characters are allowed, including letters from unicase alphabets. Keys must start with a letter. Keys and values can be no longer than 63 characters and no more than 128 bytes. Prefix "sys-" is reserved for Dialogflow defined labels. Currently allowed Dialogflow defined labels include: * sys-head * sys-contextual The above labels do not require value. "sys-head" means the intent is a head intent. "sys.contextual" means the intent is a contextual intent. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. - immutable: false - name: 'description' type: String description: | diff --git a/mmv1/products/discoveryengine/go_DataStore.yaml b/mmv1/products/discoveryengine/go_DataStore.yaml index a999bd7f9f55..bfa83611338e 100644 --- a/mmv1/products/discoveryengine/go_DataStore.yaml +++ b/mmv1/products/discoveryengine/go_DataStore.yaml @@ -25,34 +25,24 @@ references: docs: base_url: 'projects/{{project}}/locations/{{location}}/collections/default_collection/dataStores' self_link: 'projects/{{project}}/locations/{{location}}/collections/default_collection/dataStores/{{data_store_id}}' -create_url: 'projects/{{project}}/locations/{{location}}/collections/default_collection/dataStores?dataStoreId={{data_store_id}}&createAdvancedSiteSearch={{create_advanced_site_search}}' +create_url: 'projects/{{project}}/locations/{{location}}/collections/default_collection/dataStores?dataStoreId={{data_store_id}}&createAdvancedSiteSearch={{create_advanced_site_search}}&skipDefaultSchemaCreation={{skip_default_schema_creation}}' update_verb: 'PATCH' update_mask: true delete_url: 'projects/{{project}}/locations/{{location}}/collections/default_collection/dataStores/{{data_store_id}}' import_format: - 'projects/{{project}}/locations/{{location}}/collections/default_collection/dataStores/{{data_store_id}}' timeouts: - insert_minutes: 60 - update_minutes: 60 - delete_minutes: 60 + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 autogen_async: true async: - actions: ['create', 'delete', 'update'] + actions: ['create', 'delete'] type: 'OpAsync' operation: base_url: '{{op_id}}' - path: 'name' - wait_ms: 1000 - timeouts: - insert_minutes: 60 - update_minutes: 60 - delete_minutes: 60 result: - path: 'response' resource_inside_response: false - error: - path: 'error' - message: 'message' custom_code: examples: - name: 'discoveryengine_datastore_basic' @@ -71,6 +61,12 @@ examples: vars: data_store_id: 'data-store-id' skip_docs: true + - name: 'discoveryengine_datastore_document_processing_config_layout' + primary_resource_id: 'document_processing_config_layout' + primary_resource_name: 'fmt.Sprintf("tf_test_data_store%s", context["random_suffix"])' + vars: + data_store_id: 'data-store-id' + skip_docs: true parameters: - name: 'location' type: String @@ -93,6 +89,18 @@ parameters: If true, an advanced data store for site search will be created. If the data store is not configured as site search (GENERIC vertical and PUBLIC_WEBSITE contentConfig), this flag will be ignored. + url_param_only: true + default_value: false + - name: 'skipDefaultSchemaCreation' + type: Boolean + description: | + A boolean flag indicating whether to skip the default schema creation for + the data store. Only enable this flag if you are certain that the default + schema is incompatible with your use case. + If set to true, you must manually create a schema for the data store + before any documents can be ingested. + This flag cannot be specified if `data_store.starting_schema` is + specified. url_param_only: true default_value: false @@ -120,6 +128,7 @@ properties: enum_values: - 'GENERIC' - 'MEDIA' + - 'HEALTHCARE_FHIR' - name: 'solutionTypes' type: Array description: | @@ -133,6 +142,7 @@ properties: - 'SOLUTION_TYPE_RECOMMENDATION' - 'SOLUTION_TYPE_SEARCH' - 'SOLUTION_TYPE_CHAT' + - 'SOLUTION_TYPE_GENERATIVE_CHAT' - name: 'defaultSchemaId' type: String description: | @@ -162,6 +172,33 @@ properties: `projects/{project}/locations/{location}/collections/{collection_id}/dataStores/{data_store_id}/documentProcessingConfig`. required: false output: true + - name: 'chunkingConfig' + type: NestedObject + description: | + Whether chunking mode is enabled. + required: false + properties: + - name: 'layoutBasedChunkingConfig' + type: NestedObject + description: | + Configuration for the layout based chunking. + required: false + send_empty_value: true + allow_empty_object: true + properties: + - name: 'chunkSize' + type: Integer + description: | + The token size limit for each chunk. + Supported values: 100-500 (inclusive). Default value: 500. + required: false + - name: 'includeAncestorHeadings' + type: Boolean + description: | + Whether to include appending different levels of headings to chunks from the middle of the document to prevent context loss. + Default value: False. + + required: false - name: 'defaultParsingConfig' type: NestedObject description: | @@ -180,6 +217,7 @@ properties: exactly_one_of: - 'default_parsing_config.0.digital_parsing_config' - 'default_parsing_config.0.ocr_parsing_config' + - 'default_parsing_config.0.layout_parsing_config' properties: [] - name: 'ocrParsingConfig' @@ -190,13 +228,26 @@ properties: exactly_one_of: - 'default_parsing_config.0.digital_parsing_config' - 'default_parsing_config.0.ocr_parsing_config' + - 'default_parsing_config.0.layout_parsing_config' properties: - name: 'useNativeText' type: Boolean description: | If true, will use native text instead of OCR text on pages containing native text. - required: false + - name: 'layoutParsingConfig' + type: NestedObject + description: | + Configurations applied to layout parser. + required: false + send_empty_value: true + allow_empty_object: true + exactly_one_of: + - 'default_parsing_config.0.digital_parsing_config' + - 'default_parsing_config.0.ocr_parsing_config' + - 'default_parsing_config.0.layout_parsing_config' + properties: + [] - name: 'parsingConfigOverrides' type: Map description: | @@ -206,6 +257,7 @@ properties: * `docx`: Override parsing config for DOCX files, only digital parsing and or layout parsing are supported. key_name: 'file_type' value_type: + name: parsingConfigOverrides type: NestedObject properties: - name: 'digitalParsingConfig' @@ -218,6 +270,7 @@ properties: exactly_one_of: - 'default_parsing_config.0.digital_parsing_config' - 'default_parsing_config.0.ocr_parsing_config' + - 'default_parsing_config.0.layout_parsing_config' properties: [] - name: 'ocrParsingConfig' @@ -228,13 +281,26 @@ properties: exactly_one_of: - 'default_parsing_config.0.digital_parsing_config' - 'default_parsing_config.0.ocr_parsing_config' + - 'default_parsing_config.0.layout_parsing_config' properties: - name: 'useNativeText' type: Boolean description: | If true, will use native text instead of OCR text on pages containing native text. - required: false + - name: 'layoutParsingConfig' + type: NestedObject + description: | + Configurations applied to layout parser. + required: false + send_empty_value: true + allow_empty_object: true + exactly_one_of: + - 'default_parsing_config.0.digital_parsing_config' + - 'default_parsing_config.0.ocr_parsing_config' + - 'default_parsing_config.0.layout_parsing_config' + properties: + [] - name: 'createTime' type: Time description: | diff --git a/mmv1/products/discoveryengine/go_Schema.yaml b/mmv1/products/discoveryengine/go_Schema.yaml new file mode 100644 index 000000000000..3ec0b6d8e311 --- /dev/null +++ b/mmv1/products/discoveryengine/go_Schema.yaml @@ -0,0 +1,103 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'Schema' +description: | + Schema defines the structure and layout of a type of document data. +references: + guides: + 'Provide a schema for your data store': 'https://cloud.google.com/generative-ai-app-builder/docs/provide-schema' + api: 'https://cloud.google.com/generative-ai-app-builder/docs/reference/rest/v1/projects.locations.collections.dataStores.schemas' +docs: +base_url: 'projects/{{project}}/locations/{{location}}/collections/default_collection/dataStores/{{data_store_id}}/schemas' +self_link: 'projects/{{project}}/locations/{{location}}/collections/default_collection/dataStores/{{data_store_id}}/schemas/{{schema_id}}' +create_url: 'projects/{{project}}/locations/{{location}}/collections/default_collection/dataStores/{{data_store_id}}/schemas?schemaId={{schema_id}}' +delete_url: 'projects/{{project}}/locations/{{location}}/collections/default_collection/dataStores/{{data_store_id}}/schemas/{{schema_id}}' +immutable: true +import_format: + - 'projects/{{project}}/locations/{{location}}/collections/default_collection/dataStores/{{data_store_id}}/schemas/{{schema_id}}' +timeouts: + insert_minutes: 60 + update_minutes: 60 + delete_minutes: 60 +autogen_async: true +async: + actions: ['create', 'delete', 'update'] + type: 'OpAsync' + operation: + base_url: '{{op_id}}' + path: 'name' + wait_ms: 1000 + timeouts: + insert_minutes: 60 + update_minutes: 60 + delete_minutes: 60 + result: + path: 'response' + resource_inside_response: false + error: + path: 'error' + message: 'message' +custom_code: +examples: + - name: 'discoveryengine_schema_basic' + primary_resource_id: 'basic' + vars: + data_store_id: 'data-store-id' + schema_id: 'schema-id' +parameters: + - name: 'location' + type: String + description: | + The geographic location where the data store should reside. The value can + only be one of "global", "us" and "eu". + url_param_only: true + required: true + immutable: true + - name: 'dataStoreId' + type: String + description: | + The unique id of the data store. + url_param_only: true + required: true + immutable: true + - name: 'schemaId' + type: String + description: | + The unique id of the schema. + + url_param_only: true + required: true + immutable: true +properties: + - name: 'name' + type: String + description: | + The unique full resource name of the schema. Values are of the format + `projects/{project}/locations/{location}/collections/{collection_id}/dataStores/{data_store_id}/schemas/{schema_id}`. + This field must be a UTF-8 encoded string with a length limit of 1024 + characters. + output: true + - name: 'jsonSchema' + type: String + description: | + The JSON representation of the schema. + immutable: true + exactly_one_of: + - 'json_schema' + state_func: 'func(v interface{}) string { s, _ := structure.NormalizeJsonString(v); return s }' + custom_flatten: 'templates/terraform/custom_flatten/go/discoveryengine_schema_json_schema.go.tmpl' + validation: + function: 'validation.StringIsJSON' diff --git a/mmv1/products/dlp/go_DiscoveryConfig.yaml b/mmv1/products/dlp/go_DiscoveryConfig.yaml index ff7fd3f887e3..a4be9b81b1f1 100644 --- a/mmv1/products/dlp/go_DiscoveryConfig.yaml +++ b/mmv1/products/dlp/go_DiscoveryConfig.yaml @@ -32,12 +32,12 @@ timeouts: insert_minutes: 20 update_minutes: 20 delete_minutes: 20 -skip_sweeper: true custom_code: encoder: 'templates/terraform/encoders/go/wrap_object.go.tmpl' update_encoder: 'templates/terraform/encoders/go/wrap_object.go.tmpl' decoder: 'templates/terraform/decoders/go/unwrap_resource.go.tmpl' custom_import: 'templates/terraform/custom_import/go/dlp_import.go.tmpl' +skip_sweeper: true examples: - name: 'dlp_discovery_config_basic' primary_resource_id: 'basic' @@ -205,6 +205,50 @@ properties: enum_values: - 'TABLE_PROFILE' - 'RESOURCE_NAME' + - name: 'tagResources' + type: NestedObject + description: Publish a message into the Pub/Sub topic. + properties: + - name: 'tagConditions' + type: Array + description: The tags to associate with different conditions. + item_type: + type: NestedObject + properties: + - name: 'tag' + type: NestedObject + description: The tag value to attach to resources. + properties: + - name: 'namespacedValue' + type: String + description: The namespaced name for the tag value to attach to resources. Must be in the format `{parent_id}/{tag_key_short_name}/{short_name}`, for example, "123456/environment/prod". + - name: 'sensitivityScore' + type: NestedObject + description: Conditions attaching the tag to a resource on its profile having this sensitivity score. + properties: + - name: 'score' + type: Enum + description: | + The sensitivity score applied to the resource. + required: true + enum_values: + - 'SENSITIVITY_LOW' + - 'SENSITIVITY_MODERATE' + - 'SENSITIVITY_HIGH' + - name: 'profileGenerationsToTag' + type: Array + description: The profile generations for which the tag should be attached to resources. If you attach a tag to only new profiles, then if the sensitivity score of a profile subsequently changes, its tag doesn't change. By default, this field includes only new profiles. To include both new and updated profiles for tagging, this field should explicitly include both `PROFILE_GENERATION_NEW` and `PROFILE_GENERATION_UPDATE`. + item_type: + type: Enum + description: | + This field only has a name and description because of MM + limitations. It should not appear in downstreams. + enum_values: + - 'PROFILE_GENERATION_NEW' + - 'PROFILE_GENERATION_UPDATE' + - name: 'lowerDataRiskToLow' + type: Boolean + description: Whether applying a tag to a resource should lower the risk of the profile for that resource. For example, in conjunction with an [IAM deny policy](https://cloud.google.com/iam/docs/deny-overview), you can deny all principals a permission if a tag value is present, mitigating the risk of the resource. This also lowers the data risk of resources at the lower levels of the resource hierarchy. For example, reducing the data risk of a table data profile also reduces the data risk of the constituent column data profiles. - name: 'targets' type: Array description: Target to match against for determining what to scan and how frequently @@ -346,6 +390,17 @@ properties: - 'UPDATE_FREQUENCY_NEVER' - 'UPDATE_FREQUENCY_DAILY' - 'UPDATE_FREQUENCY_MONTHLY' + - name: 'inspectTemplateModifiedCadence' + type: NestedObject + description: Governs when to update data profiles when the inspection rules defined by the `InspectTemplate` change. If not set, changing the template will not cause a data profile to update. + properties: + - name: 'frequency' + type: Enum + description: How frequently data profiles can be updated when the template is modified. Defaults to never. + enum_values: + - 'UPDATE_FREQUENCY_NEVER' + - 'UPDATE_FREQUENCY_DAILY' + - 'UPDATE_FREQUENCY_MONTHLY' - name: 'disabled' type: NestedObject description: 'Tables that match this filter will not have profiles created.' @@ -475,6 +530,18 @@ properties: - 'UPDATE_FREQUENCY_NEVER' - 'UPDATE_FREQUENCY_DAILY' - 'UPDATE_FREQUENCY_MONTHLY' + - name: 'inspectTemplateModifiedCadence' + type: NestedObject + description: Governs when to update data profiles when the inspection rules defined by the `InspectTemplate` change. If not set, changing the template will not cause a data profile to update. + properties: + - name: 'frequency' + type: Enum + description: How frequently data profiles can be updated when the template is modified. Defaults to never. + required: true + enum_values: + - 'UPDATE_FREQUENCY_NEVER' + - 'UPDATE_FREQUENCY_DAILY' + - 'UPDATE_FREQUENCY_MONTHLY' - name: 'disabled' type: NestedObject description: 'Disable profiling for database resources that match this filter.' diff --git a/mmv1/products/dns/go_ManagedZone.yaml b/mmv1/products/dns/go_ManagedZone.yaml new file mode 100644 index 000000000000..57f23525eb92 --- /dev/null +++ b/mmv1/products/dns/go_ManagedZone.yaml @@ -0,0 +1,429 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'ManagedZone' +kind: 'dns#managedZone' +description: | + A zone is a subtree of the DNS namespace under one administrative + responsibility. A ManagedZone is a resource that represents a DNS zone + hosted by the Cloud DNS service. +references: + guides: + 'Managing Zones': 'https://cloud.google.com/dns/zones/' + api: 'https://cloud.google.com/dns/api/v1/managedZones' +docs: +id_format: 'projects/{{project}}/managedZones/{{name}}' +base_url: 'projects/{{project}}/managedZones' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +iam_policy: + method_name_separator: ':' + fetch_iam_policy_verb: 'POST' + parent_resource_attribute: 'managed_zone' + example_config_body: 'templates/terraform/iam/go/iam_attributes.go.tmpl' + import_format: + - 'projects/{{project}}/managedZones/{{managed_zone}}' + - '{{project}}/{{managed_zone}}' +custom_code: + update_encoder: 'templates/terraform/update_encoder/go/managed_dns_zone.go.tmpl' + pre_delete: 'templates/terraform/pre_delete/go/managed_dns_zone.go.tmpl' +examples: + - name: 'dns_managed_zone_quickstart' + primary_resource_id: 'default' + primary_resource_name: 'fmt.Sprintf("tf-test-example-zone-googlecloudexample%s", context["random_suffix"])' + vars: + dns_compute_instance: 'dns-compute-instance' + allow_http_traffic: 'allow-http-traffic' + example_zone_googlecloudexample: 'example-zone-googlecloudexample' + dns_name: 'googlecloudexample.net.' + test_vars_overrides: + 'dns_name': '"m-z.gcp.tfacc.hashicorptest.com."' + ignore_read_extra: + - 'force_destroy' + skip_docs: true + - name: 'dns_record_set_basic' + primary_resource_id: 'parent-zone' + vars: + sample_zone: 'sample-zone' + skip_docs: true + - name: 'dns_managed_zone_basic' + primary_resource_id: 'example-zone' + external_providers: ["random", "time"] + skip_vcr: true + - name: 'dns_managed_zone_private' + primary_resource_id: 'private-zone' + vars: + zone_name: 'private-zone' + network_1_name: 'network-1' + network_2_name: 'network-2' + - name: 'dns_managed_zone_private_multiproject' + primary_resource_id: 'private-zone' + vars: + zone_name: 'private-zone' + network_1_name: 'network-1' + network_2_name: 'network-2' + project_1_name: 'project-1' + project_2_name: 'project-2' + test_env_vars: + org_id: 'ORG_ID' + billing_account: 'BILLING_ACCT' + skip_docs: true + - name: 'dns_managed_zone_private_forwarding' + primary_resource_id: 'private-zone' + vars: + zone_name: 'private-zone' + network_1_name: 'network-1' + network_2_name: 'network-2' + skip_test: true + - name: 'dns_managed_zone_private_gke' + primary_resource_id: 'private-zone-gke' + vars: + zone_name: 'private-zone' + network_1_name: 'network-1' + cluster_1_name: 'cluster-1' + deletion_protection: 'true' + test_vars_overrides: + 'deletion_protection': 'false' + - name: 'dns_managed_zone_private_peering' + primary_resource_id: 'peering-zone' + vars: + zone_name: 'peering-zone' + network_source_name: 'network-source' + network_target_name: 'network-target' + - name: 'dns_managed_zone_service_directory' + primary_resource_id: 'sd-zone' + min_version: 'beta' + vars: + zone_name: 'peering-zone' + network_name: 'network' + - name: 'dns_managed_zone_cloud_logging' + primary_resource_id: 'cloud-logging-enabled-zone' + vars: + zone_name: 'cloud-logging-enabled-zone' +virtual_fields: + - name: 'force_destroy' + description: 'Set this true to delete all records in the zone.' + type: Boolean + default_value: false +parameters: +properties: + - name: 'description' + type: String + description: | + A textual description field. Defaults to 'Managed by Terraform'. + required: false + validation: + function: 'validation.StringIsNotEmpty' + default_value: "Managed by Terraform" + - name: 'dnsName' + type: String + description: | + The DNS name of this managed zone, for instance "example.com.". + required: true + immutable: true + - name: 'dnssecConfig' + type: NestedObject + description: DNSSEC configuration + properties: + - name: 'kind' + type: String + description: Identifies what kind of resource this is + at_least_one_of: + - 'dnssec_config.0.kind' + - 'dnssec_config.0.non_existence' + - 'dnssec_config.0.state' + - 'dnssec_config.0.default_key_specs' + default_value: "dns#managedZoneDnsSecConfig" + - name: 'nonExistence' + type: Enum + description: | + Specifies the mechanism used to provide authenticated denial-of-existence responses. + non_existence can only be updated when the state is `off`. + default_from_api: true + at_least_one_of: + - 'dnssec_config.0.kind' + - 'dnssec_config.0.non_existence' + - 'dnssec_config.0.state' + - 'dnssec_config.0.default_key_specs' + enum_values: + - 'nsec' + - 'nsec3' + - name: 'state' + type: Enum + description: Specifies whether DNSSEC is enabled, and what mode it is in + at_least_one_of: + - 'dnssec_config.0.kind' + - 'dnssec_config.0.non_existence' + - 'dnssec_config.0.state' + - 'dnssec_config.0.default_key_specs' + enum_values: + - 'off' + - 'on' + - 'transfer' + - name: 'defaultKeySpecs' + type: Array + description: | + Specifies parameters that will be used for generating initial DnsKeys + for this ManagedZone. If you provide a spec for keySigning or zoneSigning, + you must also provide one for the other. + default_key_specs can only be updated when the state is `off`. + default_from_api: true + at_least_one_of: + - 'dnssec_config.0.kind' + - 'dnssec_config.0.non_existence' + - 'dnssec_config.0.state' + - 'dnssec_config.0.default_key_specs' + item_type: + type: NestedObject + properties: + - name: 'algorithm' + type: Enum + description: + String mnemonic specifying the DNSSEC algorithm of this key + enum_values: + - 'ecdsap256sha256' + - 'ecdsap384sha384' + - 'rsasha1' + - 'rsasha256' + - 'rsasha512' + - name: 'keyLength' + type: Integer + description: Length of the keys in bits + - name: 'keyType' + type: Enum + description: | + Specifies whether this is a key signing key (KSK) or a zone + signing key (ZSK). Key signing keys have the Secure Entry + Point flag set and, when active, will only be used to sign + resource record sets of type DNSKEY. Zone signing keys do + not have the Secure Entry Point flag set and will be used + to sign all other types of resource record sets. + enum_values: + - 'keySigning' + - 'zoneSigning' + - name: 'kind' + type: String + description: 'Identifies what kind of resource this is' + default_value: "dns#dnsKeySpec" + - name: 'managedZoneID' + type: Integer + description: Unique identifier for the resource; defined by the server. + api_name: id + output: true + - name: 'name' + type: String + description: | + User assigned name for this resource. + Must be unique within the project. + required: true + immutable: true + - name: 'nameServers' + type: Array + description: | + Delegate your managed_zone to these virtual name servers; + defined by the server + output: true + item_type: + type: String + - name: 'creationTime' + type: Time + description: | + The time that this resource was created on the server. + This is in RFC3339 text format. + output: true + - name: 'labels' + type: KeyValueLabels + description: | + A set of key/value label pairs to assign to this ManagedZone. + - name: 'visibility' + type: Enum + description: | + The zone's visibility: public zones are exposed to the Internet, + while private zones are visible only to Virtual Private Cloud resources. + immutable: true + diff_suppress_func: 'tpgresource.CaseDiffSuppress' + custom_flatten: 'templates/terraform/custom_flatten/go/default_if_empty.tmpl' + default_value: "public" + enum_values: + - 'private' + - 'public' + - name: 'privateVisibilityConfig' + type: NestedObject + description: | + For privately visible zones, the set of Virtual Private Cloud + resources that the zone is visible from. At least one of `gke_clusters` or `networks` must be specified. + send_empty_value: true + at_least_one_of: + - 'gke_clusters' + - 'networks' + custom_expand: 'templates/terraform/custom_expand/go/dns_managed_zone_private_visibility_config.go.tmpl' + properties: + - name: 'gkeClusters' + type: Array + description: + 'The list of Google Kubernetes Engine clusters that can see this zone.' + item_type: + type: NestedObject + properties: + - name: 'gkeClusterName' + type: String + description: | + The resource name of the cluster to bind this ManagedZone to. + This should be specified in the format like + `projects/*/locations/*/clusters/*` + required: true + - name: 'networks' + type: Array + description: | + The list of VPC networks that can see this zone. Until the provider updates to use the Terraform 0.12 SDK in a future release, you + may experience issues with this resource while updating. If you've defined a `networks` block and + add another `networks` block while keeping the old block, Terraform will see an incorrect diff + and apply an incorrect update to the resource. If you encounter this issue, remove all `networks` + blocks in an update and then apply another update adding all of them back simultaneously. + is_set: true + set_hash_func: |- + func(v interface{}) int { + if v == nil { + return 0 + } + raw := v.(map[string]interface{}) + if url, ok := raw["network_url"]; ok { + return tpgresource.SelfLinkRelativePathHash(url) + } + var buf bytes.Buffer + schema.SerializeResourceForHash(&buf, raw, dnsManagedZonePrivateVisibilityConfigNetworksSchema()) + return tpgresource.Hashcode(buf.String()) + } + item_type: + type: NestedObject + properties: + - name: 'networkUrl' + type: String + description: | + The id or fully qualified URL of the VPC network to bind to. + This should be formatted like `projects/{project}/global/networks/{network}` or + `https://www.googleapis.com/compute/v1/projects/{project}/global/networks/{network}` + required: true + diff_suppress_func: 'tpgresource.CompareSelfLinkOrResourceName' + - name: 'forwardingConfig' + type: NestedObject + description: | + The presence for this field indicates that outbound forwarding is enabled + for this zone. The value of this field contains the set of destinations + to forward to. + properties: + - name: 'targetNameServers' + type: Array + description: | + List of target name servers to forward to. Cloud DNS will + select the best available name server if more than + one target is given. + is_set: true + required: true + set_hash_func: |- + func(v interface{}) int { + raw := v.(map[string]interface{}) + if address, ok := raw["ipv4_address"]; ok { + tpgresource.Hashcode(address.(string)) + } + var buf bytes.Buffer + schema.SerializeResourceForHash(&buf, raw, dnsManagedZoneForwardingConfigTargetNameServersSchema()) + return tpgresource.Hashcode(buf.String()) + } + item_type: + type: NestedObject + properties: + - name: 'ipv4Address' + type: String + description: 'IPv4 address of a target name server.' + required: true + - name: 'forwardingPath' + type: Enum + description: | + Forwarding path for this TargetNameServer. If unset or `default` Cloud DNS will make forwarding + decision based on address ranges, i.e. RFC1918 addresses go to the VPC, Non-RFC1918 addresses go + to the Internet. When set to `private`, Cloud DNS will always send queries through VPC for this target + enum_values: + - 'default' + - 'private' + - name: 'peeringConfig' + type: NestedObject + description: | + The presence of this field indicates that DNS Peering is enabled for this + zone. The value of this field contains the network to peer with. + properties: + - name: 'targetNetwork' + type: NestedObject + description: 'The network with which to peer.' + required: true + properties: + - name: 'networkUrl' + type: String + description: | + The id or fully qualified URL of the VPC network to forward queries to. + This should be formatted like `projects/{project}/global/networks/{network}` or + `https://www.googleapis.com/compute/v1/projects/{project}/global/networks/{network}` + required: true + diff_suppress_func: 'tpgresource.CompareSelfLinkOrResourceName' + custom_expand: 'templates/terraform/custom_expand/go/network_full_url.tmpl' + - name: 'reverseLookup' + type: Boolean + description: | + Specifies if this is a managed reverse lookup zone. If true, Cloud DNS will resolve reverse + lookup queries using automatically configured records for VPC resources. This only applies + to networks listed under `private_visibility_config`. + api_name: reverseLookupConfig + min_version: 'beta' + immutable: true + custom_flatten: 'templates/terraform/custom_flatten/go/object_to_bool.go.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/bool_to_object.go.tmpl' + - name: 'serviceDirectoryConfig' + type: NestedObject + description: + The presence of this field indicates that this zone is backed by Service + Directory. The value of this field contains information related to the + namespace associated with the zone. + min_version: 'beta' + immutable: true + properties: + - name: 'namespace' + type: NestedObject + description: 'The namespace associated with the zone.' + required: true + properties: + - name: 'namespaceUrl' + type: String + description: | + The fully qualified or partial URL of the service directory namespace that should be + associated with the zone. This should be formatted like + `https://servicedirectory.googleapis.com/v1/projects/{project}/locations/{location}/namespaces/{namespace_id}` + or simply `projects/{project}/locations/{location}/namespaces/{namespace_id}` + Ignored for `public` visibility zones. + required: true + custom_flatten: 'templates/terraform/custom_flatten/go/full_to_relative_path.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/sd_full_url.tmpl' + - name: 'cloudLoggingConfig' + type: NestedObject + description: 'Cloud logging configuration' + default_from_api: true + properties: + - name: 'enableLogging' + type: Boolean + description: + 'If set, enable query logging for this ManagedZone. False by default, + making logging opt-in.' + required: true diff --git a/mmv1/products/dns/go_Policy.yaml b/mmv1/products/dns/go_Policy.yaml new file mode 100644 index 000000000000..54263175e605 --- /dev/null +++ b/mmv1/products/dns/go_Policy.yaml @@ -0,0 +1,159 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'Policy' +kind: 'dns#policy' +description: | + A policy is a collection of DNS rules applied to one or more Virtual + Private Cloud resources. +references: + guides: + 'Using DNS server policies': 'https://cloud.google.com/dns/zones/#using-dns-server-policies' + api: 'https://cloud.google.com/dns/docs/reference/v1beta2/policies' +docs: +id_format: 'projects/{{project}}/policies/{{name}}' +base_url: 'projects/{{project}}/policies' +immutable: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +custom_code: + pre_delete: 'templates/terraform/pre_delete/go/detach_network.tmpl' +examples: + - name: 'dns_policy_basic' + primary_resource_id: 'example-policy' + vars: + policy_name: 'example-policy' + network_1_name: 'network-1' + network_2_name: 'network-2' + - name: 'dns_policy_multiproject' + primary_resource_id: 'example-policy-multiproject' + vars: + policy_name: 'example-policy-multiproject' + network_1_name: 'network-1' + network_2_name: 'network-2' + project_1_name: 'project-1' + project_2_name: 'project-2' + test_env_vars: + org_id: 'ORG_ID' + billing_account: 'BILLING_ACCT' + skip_docs: true +parameters: +properties: + - name: 'alternativeNameServerConfig' + type: NestedObject + description: | + Sets an alternative name server for the associated networks. + When specified, all DNS queries are forwarded to a name server that you choose. + Names such as .internal are not available when an alternative name server is specified. + update_url: 'projects/{{project}}/policies/{{name}}' + update_verb: 'PATCH' + properties: + - name: 'targetNameServers' + type: Array + description: | + Sets an alternative name server for the associated networks. When specified, + all DNS queries are forwarded to a name server that you choose. Names such as .internal + are not available when an alternative name server is specified. + is_set: true + required: true + set_hash_func: |- + func(v interface{}) int { + raw := v.(map[string]interface{}) + if address, ok := raw["ipv4_address"]; ok { + tpgresource.Hashcode(address.(string)) + } + var buf bytes.Buffer + schema.SerializeResourceForHash(&buf, raw, dnsPolicyAlternativeNameServerConfigTargetNameServersSchema()) + return tpgresource.Hashcode(buf.String()) + } + item_type: + type: NestedObject + properties: + - name: 'ipv4Address' + type: String + description: 'IPv4 address to forward to.' + required: true + - name: 'forwardingPath' + type: Enum + description: | + Forwarding path for this TargetNameServer. If unset or `default` Cloud DNS will make forwarding + decision based on address ranges, i.e. RFC1918 addresses go to the VPC, Non-RFC1918 addresses go + to the Internet. When set to `private`, Cloud DNS will always send queries through VPC for this target + enum_values: + - 'default' + - 'private' + - name: 'description' + type: String + description: | + A textual description field. Defaults to 'Managed by Terraform'. + required: false + update_url: 'projects/{{project}}/policies/{{name}}' + update_verb: 'PATCH' + default_value: "Managed by Terraform" + - name: 'enableInboundForwarding' + type: Boolean + description: | + Allows networks bound to this policy to receive DNS queries sent + by VMs or applications over VPN connections. When enabled, a + virtual IP address will be allocated from each of the sub-networks + that are bound to this policy. + send_empty_value: true + update_url: 'projects/{{project}}/policies/{{name}}' + update_verb: 'PATCH' + - name: 'enableLogging' + type: Boolean + description: | + Controls whether logging is enabled for the networks bound to this policy. + Defaults to no logging if not set. + send_empty_value: true + update_url: 'projects/{{project}}/policies/{{name}}' + update_verb: 'PATCH' + - name: 'name' + type: String + description: | + User assigned name for this policy. + required: true + - name: 'networks' + type: Array + description: + 'List of network names specifying networks to which this policy is + applied.' + is_set: true + update_url: 'projects/{{project}}/policies/{{name}}' + update_verb: 'PATCH' + set_hash_func: |- + func(v interface{}) int { + raw := v.(map[string]interface{}) + if url, ok := raw["network_url"]; ok { + return tpgresource.SelfLinkRelativePathHash(url) + } + var buf bytes.Buffer + schema.SerializeResourceForHash(&buf, raw, dnsPolicyNetworksSchema()) + return tpgresource.Hashcode(buf.String()) + } + item_type: + type: NestedObject + properties: + - name: 'networkUrl' + type: String + description: | + The id or fully qualified URL of the VPC network to forward queries to. + This should be formatted like `projects/{project}/global/networks/{network}` or + `https://www.googleapis.com/compute/v1/projects/{project}/global/networks/{network}` + required: true + diff_suppress_func: 'tpgresource.CompareSelfLinkOrResourceName' + custom_expand: 'templates/terraform/custom_expand/go/network_full_url.tmpl' diff --git a/mmv1/products/dns/go_ResponsePolicy.yaml b/mmv1/products/dns/go_ResponsePolicy.yaml new file mode 100644 index 000000000000..c9ad2091c2d3 --- /dev/null +++ b/mmv1/products/dns/go_ResponsePolicy.yaml @@ -0,0 +1,90 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'ResponsePolicy' +kind: 'dns#responsePolicy' +description: | + A Response Policy is a collection of selectors that apply to queries + made against one or more Virtual Private Cloud networks. +docs: +base_url: 'projects/{{project}}/responsePolicies' +self_link: 'projects/{{project}}/responsePolicies/{{response_policy_name}}' +update_verb: 'PATCH' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +identity: + - responsePolicyName +custom_code: + pre_delete: 'templates/terraform/pre_delete/go/response_policy_detach_network_gke.tmpl' +examples: + - name: 'dns_response_policy_basic' + primary_resource_id: 'example-response-policy' + vars: + response_policy_name: 'example-response-policy' + network_1_name: 'network-1' + network_2_name: 'network-2' + cluster_1_name: 'cluster-1' + deletion_protection: 'true' + test_vars_overrides: + 'deletion_protection': 'false' +parameters: +properties: + - name: 'responsePolicyName' + type: String + description: + The user assigned name for this Response Policy, such as + `myresponsepolicy`. + required: true + immutable: true + - name: 'description' + type: String + description: | + The description of the response policy, such as `My new response policy`. + required: false + default_value: "Managed by Terraform" + - name: 'networks' + type: Array + description: + 'The list of network names specifying networks to which this policy is + applied.' + send_empty_value: true + item_type: + type: NestedObject + properties: + - name: 'networkUrl' + type: String + description: | + The fully qualified URL of the VPC network to bind to. + This should be formatted like + `https://www.googleapis.com/compute/v1/projects/{project}/global/networks/{network}` + required: true + diff_suppress_func: 'tpgresource.CompareSelfLinkOrResourceName' + custom_expand: 'templates/terraform/custom_expand/go/network_full_url.tmpl' + - name: 'gkeClusters' + type: Array + description: + 'The list of Google Kubernetes Engine clusters that can see this zone.' + item_type: + type: NestedObject + properties: + - name: 'gkeClusterName' + type: String + description: | + The resource name of the cluster to bind this ManagedZone to. + This should be specified in the format like + `projects/*/locations/*/clusters/*` + required: true diff --git a/mmv1/products/dns/go_ResponsePolicyRule.yaml b/mmv1/products/dns/go_ResponsePolicyRule.yaml new file mode 100644 index 000000000000..dcfa029cabba --- /dev/null +++ b/mmv1/products/dns/go_ResponsePolicyRule.yaml @@ -0,0 +1,131 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'ResponsePolicyRule' +kind: 'dns#responsePolicyRule' +description: | + A Response Policy Rule is a selector that applies its behavior to queries that match the selector. + Selectors are DNS names, which may be wildcards or exact matches. + Each DNS query subject to a Response Policy matches at most one ResponsePolicyRule, + as identified by the dns_name field with the longest matching suffix. +docs: +id_format: 'projects/{{project}}/responsePolicies/{{response_policy}}/rules/{{rule_name}}' +base_url: 'projects/{{project}}/responsePolicies/{{response_policy}}/rules' +self_link: 'projects/{{project}}/responsePolicies/{{response_policy}}/rules/{{rule_name}}' +update_verb: 'PATCH' +import_format: + - 'projects/{{project}}/responsePolicies/{{response_policy}}/rules/{{rule_name}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +identity: + - ruleName +custom_code: +examples: + - name: 'dns_response_policy_rule_basic' + primary_resource_id: 'example-response-policy-rule' + vars: + response_policy_name: 'example-response-policy' + network_1_name: 'network-1' + network_2_name: 'network-2' + cluster_1_name: 'cluster-1' + response_policy_rule_name: 'example-rule' +parameters: + - name: 'response_policy' + type: ResourceRef + description: | + Identifies the response policy addressed by this request. + url_param_only: true + required: true + resource: 'ResponsePolicy' + imports: 'responsePolicyName' +properties: + - name: 'ruleName' + type: String + description: + An identifier for this rule. Must be unique with the ResponsePolicy. + required: true + immutable: true + - name: 'dnsName' + type: String + description: + The DNS name (wildcard or exact) to apply this rule to. Must be unique + within the Response Policy Rule. + required: true + - name: 'localData' + type: NestedObject + description: | + Answer this query directly with DNS data. These ResourceRecordSets override any other DNS behavior for the matched name; + in particular they override private zones, the public internet, and GCP internal DNS. No SOA nor NS types are allowed. + conflicts: + - behavior + properties: + - name: 'localDatas' + type: Array + description: + All resource record sets for this selector, one per resource record + type. The name must match the dns_name. + required: true + item_type: + type: NestedObject + properties: + - name: 'name' + type: String + description: For example, www.example.com. + required: true + - name: 'type' + type: Enum + description: One of valid DNS resource types. + required: true + enum_values: + - 'A' + - 'AAAA' + - 'CAA' + - 'CNAME' + - 'DNSKEY' + - 'DS' + - 'HTTPS' + - 'IPSECVPNKEY' + - 'MX' + - 'NAPTR' + - 'NS' + - 'PTR' + - 'SOA' + - 'SPF' + - 'SRV' + - 'SSHFP' + - 'SVCB' + - 'TLSA' + - 'TXT' + - name: 'ttl' + type: Integer + description: | + Number of seconds that this ResourceRecordSet can be cached by + resolvers. + - name: 'rrdatas' + type: Array + description: | + As defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + item_type: + type: String + - name: 'behavior' + type: String + description: + Answer this query with a behavior rather than DNS data. Acceptable values + are 'behaviorUnspecified', and 'bypassResponsePolicy' + min_version: 'beta' + conflicts: + - local_data diff --git a/mmv1/products/dns/go_product.yaml b/mmv1/products/dns/go_product.yaml new file mode 100644 index 000000000000..602011cf721c --- /dev/null +++ b/mmv1/products/dns/go_product.yaml @@ -0,0 +1,24 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'DNS' +display_name: 'Cloud DNS' +versions: + - name: 'ga' + base_url: 'https://dns.googleapis.com/dns/v1/' + - name: 'beta' + base_url: 'https://dns.googleapis.com/dns/v1beta2/' +scopes: + - 'https://www.googleapis.com/auth/ndev.clouddns.readwrite' diff --git a/mmv1/products/edgecontainer/go_Cluster.yaml b/mmv1/products/edgecontainer/go_Cluster.yaml index 590ccb156e0c..a1eb9b80b36e 100644 --- a/mmv1/products/edgecontainer/go_Cluster.yaml +++ b/mmv1/products/edgecontainer/go_Cluster.yaml @@ -57,10 +57,12 @@ examples: primary_resource_id: 'default' vars: edgecontainer_cluster_name: 'basic-cluster' + skip_vcr: true - name: 'edgecontainer_cluster_with_maintenance_window' primary_resource_id: 'default' vars: edgecontainer_cluster_name: 'cluster-with-maintenance' + skip_vcr: true - name: 'edgecontainer_local_control_plane_cluster' primary_resource_id: 'default' vars: diff --git a/mmv1/products/edgenetwork/go_Network.yaml b/mmv1/products/edgenetwork/go_Network.yaml index 56a1c60b7d5e..f3bf944ddcb1 100644 --- a/mmv1/products/edgenetwork/go_Network.yaml +++ b/mmv1/products/edgenetwork/go_Network.yaml @@ -86,7 +86,7 @@ properties: `projects/{{project}}/locations/{{location}}/zones/{{zone}}/networks/{{network_id}}` output: true - name: 'labels' - type: KeyValuePairs + type: KeyValueLabels description: | Labels associated with this resource. required: false diff --git a/mmv1/products/edgenetwork/go_Subnet.yaml b/mmv1/products/edgenetwork/go_Subnet.yaml index f59a6862db55..68e4c79f534b 100644 --- a/mmv1/products/edgenetwork/go_Subnet.yaml +++ b/mmv1/products/edgenetwork/go_Subnet.yaml @@ -96,7 +96,7 @@ properties: `projects/{{project}}/locations/{{location}}/zones/{{zone}}/subnets/{{subnet_id}}` output: true - name: 'labels' - type: KeyValuePairs + type: KeyValueLabels description: | Labels associated with this resource. required: false diff --git a/mmv1/products/firebase/go_Project.yaml b/mmv1/products/firebase/go_Project.yaml index 4b6205e60221..34f4ad712209 100644 --- a/mmv1/products/firebase/go_Project.yaml +++ b/mmv1/products/firebase/go_Project.yaml @@ -22,9 +22,13 @@ description: | min_version: 'beta' references: guides: - 'Official Documentation': 'https://firebase.google.com/' + 'Official Documentation': 'https://firebase.google.com/docs/projects/terraform/get-started' api: 'https://firebase.google.com/docs/reference/firebase-management/rest/v1beta1/projects' docs: + note: | + This resource should usually be used with a provider configuration + with `user_project_override = true` unless you wish for your quota + project to be different from the Firebase project. base_url: 'projects/{{project}}' self_link: 'projects/{{project}}' create_url: 'projects/{{project}}:addFirebase' diff --git a/mmv1/products/firebasehosting/go_Channel.yaml b/mmv1/products/firebasehosting/go_Channel.yaml index f169e29959f3..76c697913574 100644 --- a/mmv1/products/firebasehosting/go_Channel.yaml +++ b/mmv1/products/firebasehosting/go_Channel.yaml @@ -89,7 +89,6 @@ properties: type: KeyValueLabels description: Text labels used for extra metadata and/or filtering min_version: 'beta' - immutable: false - name: 'expireTime' type: Time description: | diff --git a/mmv1/products/firebasehosting/go_CustomDomain.yaml b/mmv1/products/firebasehosting/go_CustomDomain.yaml index e956016663f5..bd23cd30be48 100644 --- a/mmv1/products/firebasehosting/go_CustomDomain.yaml +++ b/mmv1/products/firebasehosting/go_CustomDomain.yaml @@ -86,10 +86,14 @@ examples: site_id: 'site-id' custom_domain: 'run.custom.domain.com' cloud_run_service_id: 'cloud-run-service-via-hosting' + deletion_protection: 'true' test_env_vars: project_id: 'PROJECT_NAME' test_vars_overrides: 'custom_domain': '"run.custom.domain.com"' + 'deletion_protection': 'false' + ignore_read_extra: + - 'deletion_protection' virtual_fields: - name: 'wait_dns_verification' description: | diff --git a/mmv1/products/firebasehosting/go_Version.yaml b/mmv1/products/firebasehosting/go_Version.yaml index a0395f425153..b7896b85c415 100644 --- a/mmv1/products/firebasehosting/go_Version.yaml +++ b/mmv1/products/firebasehosting/go_Version.yaml @@ -59,8 +59,13 @@ examples: vars: site_id: 'site-id' cloud_run_service_id: 'cloud-run-service-via-hosting' + deletion_protection: 'true' test_env_vars: project_id: 'PROJECT_NAME' + test_vars_overrides: + 'deletion_protection': 'false' + ignore_read_extra: + - 'deletion_protection' - name: 'firebasehosting_version_cloud_functions' primary_resource_id: 'default' min_version: 'beta' diff --git a/mmv1/products/firestore/go_Database.yaml b/mmv1/products/firestore/go_Database.yaml index 1ee622691214..7929d3514053 100644 --- a/mmv1/products/firestore/go_Database.yaml +++ b/mmv1/products/firestore/go_Database.yaml @@ -81,7 +81,6 @@ examples: - 'deletion_policy' - name: 'firestore_cmek_database' primary_resource_id: 'database' - min_version: 'beta' vars: database_id: 'cmek-database-id' delete_protection_state: 'DELETE_PROTECTION_ENABLED' @@ -119,7 +118,6 @@ examples: - 'deletion_policy' - name: 'firestore_cmek_database_in_datastore_mode' primary_resource_id: 'database' - min_version: 'beta' vars: database_id: 'cmek-database-id' delete_protection_state: 'DELETE_PROTECTION_ENABLED' @@ -267,7 +265,6 @@ properties: The CMEK (Customer Managed Encryption Key) configuration for a Firestore database. If not present, the database is secured by the default Google encryption key. - min_version: 'beta' immutable: true properties: - name: 'kmsKeyName' diff --git a/mmv1/products/firestore/go_Document.yaml b/mmv1/products/firestore/go_Document.yaml index ab9348f8cd15..e5cb276ef27d 100644 --- a/mmv1/products/firestore/go_Document.yaml +++ b/mmv1/products/firestore/go_Document.yaml @@ -68,6 +68,7 @@ parameters: description: | The Firestore database id. Defaults to `"(default)"`. url_param_only: true + immutable: true default_value: "(default)" - name: 'collection' type: String @@ -75,12 +76,14 @@ parameters: The collection ID, relative to database. For example: chatrooms or chatrooms/my-document/private-messages. url_param_only: true required: true + immutable: true - name: 'documentId' type: String description: | The client-assigned document ID to use for this document during creation. url_param_only: true required: true + immutable: true properties: - name: 'name' type: String diff --git a/mmv1/products/firestore/go_Field.yaml b/mmv1/products/firestore/go_Field.yaml index c88dc793135b..171480940b36 100644 --- a/mmv1/products/firestore/go_Field.yaml +++ b/mmv1/products/firestore/go_Field.yaml @@ -99,6 +99,7 @@ properties: description: | The Firestore database id. Defaults to `"(default)"`. url_param_only: true + immutable: true default_value: "(default)" - name: 'collection' type: String @@ -106,12 +107,14 @@ properties: The id of the collection group to configure. url_param_only: true required: true + immutable: true - name: 'field' type: String description: | The id of the field to configure. url_param_only: true required: true + immutable: true - name: 'name' type: String description: | diff --git a/mmv1/products/firestore/go_Index.yaml b/mmv1/products/firestore/go_Index.yaml index 23b81b12157a..917271fadca3 100644 --- a/mmv1/products/firestore/go_Index.yaml +++ b/mmv1/products/firestore/go_Index.yaml @@ -16,8 +16,9 @@ name: 'Index' description: | Cloud Firestore indexes enable simple and complex queries against documents in a database. - This resource manages composite indexes and not single field indexes. Both Firestore Native and Datastore Mode indexes are supported. + This resource manages composite indexes and not single field indexes. + To manage single field indexes, use the `google_firestore_field` resource instead. references: guides: 'Official Documentation': 'https://cloud.google.com/firestore/docs/query-data/indexing' diff --git a/mmv1/products/gkebackup/go_BackupPlan.yaml b/mmv1/products/gkebackup/go_BackupPlan.yaml index 7152caa839ba..7c1680696e21 100644 --- a/mmv1/products/gkebackup/go_BackupPlan.yaml +++ b/mmv1/products/gkebackup/go_BackupPlan.yaml @@ -227,7 +227,6 @@ properties: Description: A set of custom labels supplied by the user. A list of key->value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. - immutable: false - name: 'backupSchedule' type: NestedObject description: diff --git a/mmv1/products/gkebackup/go_RestorePlan.yaml b/mmv1/products/gkebackup/go_RestorePlan.yaml index 0543e1e1e780..e48312f48c16 100644 --- a/mmv1/products/gkebackup/go_RestorePlan.yaml +++ b/mmv1/products/gkebackup/go_RestorePlan.yaml @@ -203,7 +203,6 @@ properties: Description: A set of custom labels supplied by the user. A list of key->value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. - immutable: false - name: 'backupPlan' type: String description: | diff --git a/mmv1/products/gkehub/go_Membership.yaml b/mmv1/products/gkehub/go_Membership.yaml index 2bf2f8bf08b0..e3a922145bd0 100644 --- a/mmv1/products/gkehub/go_Membership.yaml +++ b/mmv1/products/gkehub/go_Membership.yaml @@ -135,7 +135,6 @@ properties: type: KeyValueLabels description: | Labels to apply to this membership. - immutable: false - name: 'endpoint' type: NestedObject description: | diff --git a/mmv1/products/gkehub2/go_Feature.yaml b/mmv1/products/gkehub2/go_Feature.yaml index d0b5e2442fe2..cbeb756f835b 100644 --- a/mmv1/products/gkehub2/go_Feature.yaml +++ b/mmv1/products/gkehub2/go_Feature.yaml @@ -127,7 +127,6 @@ properties: - name: 'labels' type: KeyValueLabels description: GCP labels for this Feature. - immutable: false - name: 'resourceState' type: NestedObject description: State of the Feature resource itself. @@ -271,6 +270,13 @@ properties: - name: 'version' type: String description: 'Version of ACM installed' + - name: 'management' + type: Enum + description: 'Set this field to MANAGEMENT_AUTOMATIC to enable Config Sync auto-upgrades, and set this field to MANAGEMENT_MANUAL or MANAGEMENT_UNSPECIFIED to disable Config Sync auto-upgrades.' + enum_values: + - 'MANAGEMENT_UNSPECIFIED' + - 'MANAGEMENT_AUTOMATIC' + - 'MANAGEMENT_MANUAL' - name: 'configSync' type: NestedObject description: 'ConfigSync configuration for the cluster' diff --git a/mmv1/products/gkehub2/go_MembershipBinding.yaml b/mmv1/products/gkehub2/go_MembershipBinding.yaml index fd8357ff67b8..8e40fba367fb 100644 --- a/mmv1/products/gkehub2/go_MembershipBinding.yaml +++ b/mmv1/products/gkehub2/go_MembershipBinding.yaml @@ -145,4 +145,3 @@ properties: type: KeyValueLabels description: | Labels for this Membership binding. - immutable: false diff --git a/mmv1/products/gkehub2/go_Namespace.yaml b/mmv1/products/gkehub2/go_Namespace.yaml index a5dbd58f4695..b17ceca262b9 100644 --- a/mmv1/products/gkehub2/go_Namespace.yaml +++ b/mmv1/products/gkehub2/go_Namespace.yaml @@ -138,4 +138,3 @@ properties: type: KeyValueLabels description: | Labels for this Namespace. - immutable: false diff --git a/mmv1/products/gkehub2/go_Scope.yaml b/mmv1/products/gkehub2/go_Scope.yaml index e273cf9f2e13..163310c36ac6 100644 --- a/mmv1/products/gkehub2/go_Scope.yaml +++ b/mmv1/products/gkehub2/go_Scope.yaml @@ -129,4 +129,3 @@ properties: type: KeyValueLabels description: | Labels for this Scope. - immutable: false diff --git a/mmv1/products/gkehub2/go_ScopeRBACRoleBinding.yaml b/mmv1/products/gkehub2/go_ScopeRBACRoleBinding.yaml index a3a64ce804b1..f58282dea62c 100644 --- a/mmv1/products/gkehub2/go_ScopeRBACRoleBinding.yaml +++ b/mmv1/products/gkehub2/go_ScopeRBACRoleBinding.yaml @@ -153,4 +153,3 @@ properties: type: KeyValueLabels description: | Labels for this ScopeRBACRoleBinding. - immutable: false diff --git a/mmv1/products/iap/go_AppEngineService.yaml b/mmv1/products/iap/go_AppEngineService.yaml index c02f70a3bf34..07dcbd26c22c 100644 --- a/mmv1/products/iap/go_AppEngineService.yaml +++ b/mmv1/products/iap/go_AppEngineService.yaml @@ -46,6 +46,7 @@ examples: test_env_vars: org_id: 'ORG_ID' billing_account: 'BILLING_ACCT' + skip_test: true parameters: properties: - name: 'appId' diff --git a/mmv1/products/integrations/go_Client.yaml b/mmv1/products/integrations/go_Client.yaml index 8adff75c092c..b926660903b7 100644 --- a/mmv1/products/integrations/go_Client.yaml +++ b/mmv1/products/integrations/go_Client.yaml @@ -45,11 +45,8 @@ examples: primary_resource_id: 'example' vars: key_ring_name: 'my-keyring' - service_account_id: my-service-acc + service_account_id: 'service-acc' skip_vcr: true - - name: 'integrations_client_deprecated_fields' - primary_resource_id: 'example' - skip_docs: true parameters: - name: 'location' type: String @@ -65,8 +62,6 @@ properties: Cloud KMS config for AuthModule to encrypt/decrypt credentials. immutable: true ignore_read: true - conflicts: - - provision_gmek properties: - name: 'kmsLocation' type: String @@ -101,32 +96,12 @@ properties: the kms key is stored at the same project as customer's project and ecrypted with CMEK, otherwise, the kms key is stored in the tenant project and encrypted with GMEK. - - name: 'createSampleWorkflows' - type: Boolean - description: | - Indicates if sample workflow should be created along with provisioning. - immutable: true - ignore_read: true - conflicts: - - create_sample_integrations - deprecation_message: '`create_sample_workflows` is deprecated and will be removed in a future major release. Use `create_sample_integrations` instead.' - name: 'createSampleIntegrations' type: Boolean description: | Indicates if sample integrations should be created along with provisioning. immutable: true ignore_read: true - conflicts: - - create_sample_workflows - - name: 'provisionGmek' - type: Boolean - description: | - Indicates provision with GMEK or CMEK. - immutable: true - ignore_read: true - conflicts: - - cloud_kms_config - deprecation_message: '`provision_gmek` is deprecated and will be removed in a future major release. Client would be provisioned as gmek if `cloud_kms_config` is not given.' - name: 'runAsServiceAccount' type: String description: | diff --git a/mmv1/products/kms/go_AutokeyConfig.yaml b/mmv1/products/kms/go_AutokeyConfig.yaml index bc47bd075d71..a7445c3ef986 100644 --- a/mmv1/products/kms/go_AutokeyConfig.yaml +++ b/mmv1/products/kms/go_AutokeyConfig.yaml @@ -56,6 +56,7 @@ examples: org_id: 'ORG_ID' billing_account: 'BILLING_ACCT' external_providers: ["random", "time"] + skip_vcr: true parameters: - name: 'folder' type: String diff --git a/mmv1/products/kms/go_EkmConnection.yaml b/mmv1/products/kms/go_EkmConnection.yaml index 560cfd6dad5e..49959e3e0779 100644 --- a/mmv1/products/kms/go_EkmConnection.yaml +++ b/mmv1/products/kms/go_EkmConnection.yaml @@ -38,6 +38,13 @@ timeouts: insert_minutes: 20 update_minutes: 20 delete_minutes: 20 +iam_policy: + method_name_separator: ':' + parent_resource_attribute: 'name' + iam_conditions_request_type: 'QUERY_PARAM_NESTED' + example_config_body: 'templates/terraform/iam/go/iam_attributes.go.tmpl' + import_format: + - 'projects/{{project}}/locations/{{location}}/ekmConnections/{{name}}' custom_code: examples: - name: 'kms_ekm_connection_basic' diff --git a/mmv1/products/kms/go_KeyHandle.yaml b/mmv1/products/kms/go_KeyHandle.yaml index 087ce9e1c5f3..3b1a8b113735 100644 --- a/mmv1/products/kms/go_KeyHandle.yaml +++ b/mmv1/products/kms/go_KeyHandle.yaml @@ -60,6 +60,7 @@ examples: org_id: 'ORG_ID' billing_account: 'BILLING_ACCT' external_providers: ["random", "time"] + skip_vcr: true parameters: - name: 'location' type: String @@ -89,7 +90,7 @@ properties: type: String description: | Selector of the resource type where we want to protect resources. - For example, `storage.googleapis.com/Bucket OR compute.googleapis.com/*` + For example, `storage.googleapis.com/Bucket`. min_version: 'beta' required: true immutable: true diff --git a/mmv1/products/logging/go_LogView.yaml b/mmv1/products/logging/go_LogView.yaml index 46e491e1f8ff..6d78e73c57fe 100644 --- a/mmv1/products/logging/go_LogView.yaml +++ b/mmv1/products/logging/go_LogView.yaml @@ -42,7 +42,6 @@ iam_policy: import_format: - '{{%parent}}/locations/{{location}}/buckets/{{bucket}}/views/{{name}}' - '{{name}}' - min_version: 'beta' custom_code: encoder: 'templates/terraform/encoders/go/logging_log_view.go.tmpl' pre_read: 'templates/terraform/pre_read/go/logging_log_view.go.tmpl' diff --git a/mmv1/products/managedkafka/go_Cluster.yaml b/mmv1/products/managedkafka/go_Cluster.yaml index 06921d6ac7e3..2b9840f4124a 100644 --- a/mmv1/products/managedkafka/go_Cluster.yaml +++ b/mmv1/products/managedkafka/go_Cluster.yaml @@ -95,9 +95,10 @@ properties: properties: - name: 'networkConfigs' type: Array - description: "Virtual Private Cloud (VPC) networks that must be granted - direct access to the Kafka cluster. Minimum of 1 network is required. Maximum - of 10 networks can be specified." + description: "Virtual Private Cloud (VPC) subnets where IP addresses for the Kafka + cluster are allocated. To make the cluster available in a VPC, you must specify at least + one subnet per network. You must specify between 1 and 10 subnets. + Additional subnets may be specified with additional `network_configs` blocks." min_version: 'beta' required: true item_type: @@ -108,8 +109,7 @@ properties: description: "Name of the VPC subnet from which the cluster is accessible. Both broker and bootstrap server IP addresses and DNS entries are automatically created in the subnet. The subnet must be located in the same region as the - cluster. The project may differ. A minimum of 1 subnet is required. - A maximum of 10 subnets can be specified. The name of the subnet must be + cluster. The project may differ. The name of the subnet must be in the format `projects/PROJECT_ID/regions/REGION/subnetworks/SUBNET`." min_version: 'beta' required: true diff --git a/mmv1/products/networkmanagement/go_ConnectivityTest.yaml b/mmv1/products/networkmanagement/go_ConnectivityTest.yaml index 0ba2624ae497..e5829588efe3 100644 --- a/mmv1/products/networkmanagement/go_ConnectivityTest.yaml +++ b/mmv1/products/networkmanagement/go_ConnectivityTest.yaml @@ -228,4 +228,3 @@ properties: type: KeyValueLabels description: | Resource labels to represent user-provided metadata. - immutable: false diff --git a/mmv1/products/networksecurity/go_AddressGroup.yaml b/mmv1/products/networksecurity/go_AddressGroup.yaml index 4de5daa3b6fb..0b9066b233d0 100644 --- a/mmv1/products/networksecurity/go_AddressGroup.yaml +++ b/mmv1/products/networksecurity/go_AddressGroup.yaml @@ -122,7 +122,6 @@ properties: description: | Set of label tags associated with the AddressGroup resource. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. - immutable: false - name: 'type' type: Enum description: | diff --git a/mmv1/products/networksecurity/go_AuthorizationPolicy.yaml b/mmv1/products/networksecurity/go_AuthorizationPolicy.yaml index cb1ab4cb991e..c1d962dd3f91 100644 --- a/mmv1/products/networksecurity/go_AuthorizationPolicy.yaml +++ b/mmv1/products/networksecurity/go_AuthorizationPolicy.yaml @@ -96,7 +96,6 @@ properties: description: Set of label tags associated with the AuthorizationPolicy resource. min_version: 'beta' - immutable: false - name: 'description' type: String description: | diff --git a/mmv1/products/networksecurity/go_ClientTlsPolicy.yaml b/mmv1/products/networksecurity/go_ClientTlsPolicy.yaml index d35529ee780f..5166caf08361 100644 --- a/mmv1/products/networksecurity/go_ClientTlsPolicy.yaml +++ b/mmv1/products/networksecurity/go_ClientTlsPolicy.yaml @@ -16,7 +16,6 @@ name: 'ClientTlsPolicy' description: | ClientTlsPolicy is a resource that specifies how a client should authenticate connections to backends of a service. This resource itself does not affect configuration unless it is attached to a backend service resource. -min_version: 'beta' references: guides: 'Service Security': 'https://cloud.google.com/traffic-director/docs/security-use-cases' @@ -54,20 +53,19 @@ custom_code: examples: - name: 'network_security_client_tls_policy_basic' primary_resource_id: 'default' - min_version: 'beta' vars: resource_name: 'my-client-tls-policy' + skip_vcr: true - name: 'network_security_client_tls_policy_advanced' primary_resource_id: 'default' - min_version: 'beta' vars: resource_name: 'my-client-tls-policy' + skip_vcr: true parameters: - name: 'name' type: String description: | Name of the ClientTlsPolicy resource. - min_version: 'beta' url_param_only: true required: true immutable: true @@ -76,7 +74,6 @@ parameters: description: | The location of the client tls policy. The default value is `global`. - min_version: 'beta' url_param_only: true default_value: "global" properties: @@ -84,40 +81,32 @@ properties: type: Time description: | Time the ClientTlsPolicy was created in UTC. - min_version: 'beta' output: true - name: 'updateTime' type: Time description: | Time the ClientTlsPolicy was updated in UTC. - min_version: 'beta' output: true - name: 'labels' type: KeyValueLabels description: Set of label tags associated with the ClientTlsPolicy resource. - min_version: 'beta' - immutable: false - name: 'description' type: String description: | A free-text description of the resource. Max length 1024 characters. - min_version: 'beta' - name: 'sni' type: String description: | Server Name Indication string to present to the server during TLS handshake. E.g: "secure.example.com". - min_version: 'beta' - name: 'clientCertificate' type: NestedObject description: | Defines a mechanism to provision client identity (public and private keys) for peer to peer authentication. The presence of this dictates mTLS. - min_version: 'beta' properties: - name: 'grpcEndpoint' type: NestedObject description: | gRPC specific configuration to access the gRPC server to obtain the cert and private key. - min_version: 'beta' exactly_one_of: - 'grpc_endpoint' - 'certificate_provider_instance' @@ -126,13 +115,11 @@ properties: type: String description: | The target URI of the gRPC endpoint. Only UDS path is supported, and should start with "unix:". - min_version: 'beta' required: true - name: 'certificateProviderInstance' type: NestedObject description: | The certificate provider instance specification that will be passed to the data plane, which will be used to load necessary credential information. - min_version: 'beta' exactly_one_of: - 'grpc_endpoint' - 'certificate_provider_instance' @@ -141,13 +128,11 @@ properties: type: String description: | Plugin instance name, used to locate and load CertificateProvider instance configuration. Set to "google_cloud_private_spiffe" to use Certificate Authority Service certificate provider instance. - min_version: 'beta' required: true - name: 'serverValidationCa' type: Array description: | Defines the mechanism to obtain the Certificate Authority certificate to validate the server certificate. If empty, client does not validate the server certificate. - min_version: 'beta' item_type: type: NestedObject properties: @@ -155,7 +140,6 @@ properties: type: NestedObject description: | gRPC specific configuration to access the gRPC server to obtain the cert and private key. - min_version: 'beta' exactly_one_of: - 'grpc_endpoint' - 'certificate_provider_instance' @@ -164,13 +148,11 @@ properties: type: String description: | The target URI of the gRPC endpoint. Only UDS path is supported, and should start with "unix:". - min_version: 'beta' required: true - name: 'certificateProviderInstance' type: NestedObject description: | The certificate provider instance specification that will be passed to the data plane, which will be used to load necessary credential information. - min_version: 'beta' exactly_one_of: - 'grpc_endpoint' - 'certificate_provider_instance' @@ -179,5 +161,4 @@ properties: type: String description: | Plugin instance name, used to locate and load CertificateProvider instance configuration. Set to "google_cloud_private_spiffe" to use Certificate Authority Service certificate provider instance. - min_version: 'beta' required: true diff --git a/mmv1/products/networksecurity/go_FirewallEndpoint.yaml b/mmv1/products/networksecurity/go_FirewallEndpoint.yaml index b69011e44eaf..b6373f851347 100644 --- a/mmv1/products/networksecurity/go_FirewallEndpoint.yaml +++ b/mmv1/products/networksecurity/go_FirewallEndpoint.yaml @@ -88,7 +88,6 @@ properties: type: KeyValueLabels description: | A map of key/value label pairs to assign to the resource. - immutable: false - name: 'selfLink' type: String description: | diff --git a/mmv1/products/networksecurity/go_FirewallEndpointAssociation.yaml b/mmv1/products/networksecurity/go_FirewallEndpointAssociation.yaml index efac035cab7d..6d1a4651ad0a 100644 --- a/mmv1/products/networksecurity/go_FirewallEndpointAssociation.yaml +++ b/mmv1/products/networksecurity/go_FirewallEndpointAssociation.yaml @@ -25,6 +25,12 @@ references: 'Create and associate firewall endpoints': 'https://cloud.google.com/firewall/docs/configure-firewall-endpoints' api: 'https://cloud.google.com/firewall/docs/reference/network-security/rest/v1/projects.locations.firewallEndpointAssociations#FirewallEndpointAssociation' docs: + warning: | + If you are using User ADCs (Application Default Credentials) with this resource, + you must specify a `billing_project_id` and set `user_project_override` to true + in the provider configuration. Otherwise the ACM API will return a 403 error. + Your account must have the `serviceusage.services.use` permission on the + `billing_project_id` you defined. base_url: '{{parent}}/locations/{{location}}/firewallEndpointAssociations' self_link: '{{parent}}/locations/{{location}}/firewallEndpointAssociations/{{name}}' create_url: '{{parent}}/locations/{{location}}/firewallEndpointAssociations?firewallEndpointAssociationId={{name}}' @@ -95,7 +101,6 @@ properties: type: KeyValueLabels description: | A map of key/value label pairs to assign to the resource. - immutable: false - name: 'disabled' type: Boolean description: | diff --git a/mmv1/products/networksecurity/go_SecurityProfile.yaml b/mmv1/products/networksecurity/go_SecurityProfile.yaml index 83ccbe758f7f..3d619d1cac58 100644 --- a/mmv1/products/networksecurity/go_SecurityProfile.yaml +++ b/mmv1/products/networksecurity/go_SecurityProfile.yaml @@ -106,7 +106,6 @@ properties: type: KeyValueLabels description: | A map of key/value label pairs to assign to the resource. - immutable: false - name: 'threatPreventionProfile' type: NestedObject description: The threat prevention configuration for the security profile. diff --git a/mmv1/products/networksecurity/go_SecurityProfileGroup.yaml b/mmv1/products/networksecurity/go_SecurityProfileGroup.yaml index 1f82e44de541..0c8f14064c81 100644 --- a/mmv1/products/networksecurity/go_SecurityProfileGroup.yaml +++ b/mmv1/products/networksecurity/go_SecurityProfileGroup.yaml @@ -97,7 +97,6 @@ properties: type: KeyValueLabels description: | A map of key/value label pairs to assign to the resource. - immutable: false - name: 'threatPreventionProfile' type: String description: | diff --git a/mmv1/products/networksecurity/go_ServerTlsPolicy.yaml b/mmv1/products/networksecurity/go_ServerTlsPolicy.yaml index bb132e3ff597..c3e48c7bd62b 100644 --- a/mmv1/products/networksecurity/go_ServerTlsPolicy.yaml +++ b/mmv1/products/networksecurity/go_ServerTlsPolicy.yaml @@ -16,7 +16,6 @@ name: 'ServerTlsPolicy' description: | ClientTlsPolicy is a resource that specifies how a client should authenticate connections to backends of a service. This resource itself does not affect configuration unless it is attached to a backend service resource. -min_version: 'beta' references: guides: api: 'https://cloud.google.com/traffic-director/docs/reference/network-security/rest/v1beta1/projects.locations.serverTlsPolicies' @@ -53,22 +52,18 @@ custom_code: examples: - name: 'network_security_server_tls_policy_basic' primary_resource_id: 'default' - min_version: 'beta' vars: resource_name: 'my-server-tls-policy' - name: 'network_security_server_tls_policy_advanced' primary_resource_id: 'default' - min_version: 'beta' vars: resource_name: 'my-server-tls-policy' - name: 'network_security_server_tls_policy_server_cert' primary_resource_id: 'default' - min_version: 'beta' vars: resource_name: 'my-server-tls-policy' - name: 'network_security_server_tls_policy_mtls' primary_resource_id: 'default' - min_version: 'beta' vars: resource_name: 'my-server-tls-policy' trust_config_name: 'my-trust-config' @@ -77,7 +72,6 @@ parameters: type: String description: | Name of the ServerTlsPolicy resource. - min_version: 'beta' url_param_only: true required: true immutable: true @@ -86,7 +80,6 @@ parameters: description: | The location of the server tls policy. The default value is `global`. - min_version: 'beta' url_param_only: true default_value: "global" properties: @@ -94,42 +87,34 @@ properties: type: Time description: | Time the ServerTlsPolicy was created in UTC. - min_version: 'beta' output: true - name: 'updateTime' type: Time description: | Time the ServerTlsPolicy was updated in UTC. - min_version: 'beta' output: true - name: 'labels' type: KeyValueLabels description: Set of label tags associated with the ServerTlsPolicy resource. - min_version: 'beta' - immutable: false - name: 'description' type: String description: | A free-text description of the resource. Max length 1024 characters. - min_version: 'beta' - name: 'allowOpen' type: Boolean description: | This field applies only for Traffic Director policies. It is must be set to false for external HTTPS load balancer policies. Determines if server allows plaintext connections. If set to true, server allows plain text connections. By default, it is set to false. This setting is not exclusive of other encryption modes. For example, if allowOpen and mtlsPolicy are set, server allows both plain text and mTLS connections. See documentation of other encryption modes to confirm compatibility. Consider using it if you wish to upgrade in place your deployment to TLS while having mixed TLS and non-TLS traffic reaching port :80. - min_version: 'beta' - name: 'serverCertificate' type: NestedObject description: | Defines a mechanism to provision client identity (public and private keys) for peer to peer authentication. The presence of this dictates mTLS. - min_version: 'beta' properties: - name: 'grpcEndpoint' type: NestedObject description: | gRPC specific configuration to access the gRPC server to obtain the cert and private key. - min_version: 'beta' exactly_one_of: - 'grpc_endpoint' - 'certificate_provider_instance' @@ -138,14 +123,12 @@ properties: type: String description: | The target URI of the gRPC endpoint. Only UDS path is supported, and should start with "unix:". - min_version: 'beta' required: true - name: 'certificateProviderInstance' type: NestedObject description: | Optional if policy is to be used with Traffic Director. For external HTTPS load balancer must be empty. Defines a mechanism to provision server identity (public and private keys). Cannot be combined with allowOpen as a permissive mode that allows both plain text and TLS is not supported. - min_version: 'beta' exactly_one_of: - 'grpc_endpoint' - 'certificate_provider_instance' @@ -154,21 +137,18 @@ properties: type: String description: | Plugin instance name, used to locate and load CertificateProvider instance configuration. Set to "google_cloud_private_spiffe" to use Certificate Authority Service certificate provider instance. - min_version: 'beta' required: true - name: 'mtlsPolicy' type: NestedObject description: | This field is required if the policy is used with external HTTPS load balancers. This field can be empty for Traffic Director. Defines a mechanism to provision peer validation certificates for peer to peer authentication (Mutual TLS - mTLS). If not specified, client certificate will not be requested. The connection is treated as TLS and not mTLS. If allowOpen and mtlsPolicy are set, server allows both plain text and mTLS connections. - min_version: 'beta' properties: - name: 'clientValidationMode' type: Enum description: | When the client presents an invalid certificate or no certificate to the load balancer, the clientValidationMode specifies how the client connection is handled. Required if the policy is to be used with the external HTTPS load balancing. For Traffic Director it must be empty. - min_version: 'beta' immutable: true enum_values: - 'CLIENT_VALIDATION_MODE_UNSPECIFIED' @@ -180,14 +160,12 @@ properties: Reference to the TrustConfig from certificatemanager.googleapis.com namespace. If specified, the chain validation will be performed against certificates configured in the given TrustConfig. Allowed only if the policy is to be used with external HTTPS load balancers. - min_version: 'beta' immutable: true - name: 'clientValidationCa' type: Array description: | Required if the policy is to be used with Traffic Director. For external HTTPS load balancers it must be empty. Defines the mechanism to obtain the Certificate Authority certificate to validate the client certificate. - min_version: 'beta' item_type: type: NestedObject properties: @@ -195,7 +173,6 @@ properties: type: NestedObject description: | gRPC specific configuration to access the gRPC server to obtain the cert and private key. - min_version: 'beta' exactly_one_of: - 'grpc_endpoint' - 'certificate_provider_instance' @@ -204,14 +181,12 @@ properties: type: String description: | The target URI of the gRPC endpoint. Only UDS path is supported, and should start with "unix:". - min_version: 'beta' required: true - name: 'certificateProviderInstance' type: NestedObject description: | Optional if policy is to be used with Traffic Director. For external HTTPS load balancer must be empty. Defines a mechanism to provision server identity (public and private keys). Cannot be combined with allowOpen as a permissive mode that allows both plain text and TLS is not supported. - min_version: 'beta' exactly_one_of: - 'grpc_endpoint' - 'certificate_provider_instance' @@ -220,5 +195,4 @@ properties: type: String description: | Plugin instance name, used to locate and load CertificateProvider instance configuration. Set to "google_cloud_private_spiffe" to use Certificate Authority Service certificate provider instance. - min_version: 'beta' required: true diff --git a/mmv1/products/networkservices/go_EdgeCacheKeyset.yaml b/mmv1/products/networkservices/go_EdgeCacheKeyset.yaml index c93282014844..0c1b45847550 100644 --- a/mmv1/products/networkservices/go_EdgeCacheKeyset.yaml +++ b/mmv1/products/networkservices/go_EdgeCacheKeyset.yaml @@ -81,7 +81,6 @@ properties: - name: 'labels' type: KeyValueLabels description: 'Set of label tags associated with the EdgeCache resource.' - immutable: false - name: 'public_key' type: Array description: | diff --git a/mmv1/products/networkservices/go_EdgeCacheOrigin.yaml b/mmv1/products/networkservices/go_EdgeCacheOrigin.yaml index 51ce9f3f1c64..2a759fa8057f 100644 --- a/mmv1/products/networkservices/go_EdgeCacheOrigin.yaml +++ b/mmv1/products/networkservices/go_EdgeCacheOrigin.yaml @@ -89,7 +89,6 @@ properties: - name: 'labels' type: KeyValueLabels description: 'Set of label tags associated with the EdgeCache resource.' - immutable: false - name: 'originAddress' type: String description: | diff --git a/mmv1/products/networkservices/go_EdgeCacheService.yaml b/mmv1/products/networkservices/go_EdgeCacheService.yaml index f0a34752a429..86ccaf52a816 100644 --- a/mmv1/products/networkservices/go_EdgeCacheService.yaml +++ b/mmv1/products/networkservices/go_EdgeCacheService.yaml @@ -88,7 +88,6 @@ properties: - name: 'labels' type: KeyValueLabels description: 'Set of label tags associated with the EdgeCache resource.' - immutable: false - name: 'disableQuic' type: Boolean description: | diff --git a/mmv1/products/networkservices/go_EndpointPolicy.yaml b/mmv1/products/networkservices/go_EndpointPolicy.yaml index 76ad3250c77f..e0ade9e8be66 100644 --- a/mmv1/products/networkservices/go_EndpointPolicy.yaml +++ b/mmv1/products/networkservices/go_EndpointPolicy.yaml @@ -86,7 +86,6 @@ properties: type: KeyValueLabels description: Set of label tags associated with the TcpRoute resource. min_version: 'beta' - immutable: false - name: 'description' type: String description: | diff --git a/mmv1/products/networkservices/go_Gateway.yaml b/mmv1/products/networkservices/go_Gateway.yaml index 674909a93385..c87d7974085f 100644 --- a/mmv1/products/networkservices/go_Gateway.yaml +++ b/mmv1/products/networkservices/go_Gateway.yaml @@ -133,7 +133,6 @@ properties: - name: 'labels' type: KeyValueLabels description: Set of label tags associated with the Gateway resource. - immutable: false - name: 'description' type: String description: | diff --git a/mmv1/products/networkservices/go_GrpcRoute.yaml b/mmv1/products/networkservices/go_GrpcRoute.yaml index 51fba4ae1b0f..30d530c95de3 100644 --- a/mmv1/products/networkservices/go_GrpcRoute.yaml +++ b/mmv1/products/networkservices/go_GrpcRoute.yaml @@ -99,7 +99,6 @@ properties: type: KeyValueLabels description: Set of label tags associated with the GrpcRoute resource. min_version: 'beta' - immutable: false - name: 'description' type: String description: | diff --git a/mmv1/products/networkservices/go_HttpRoute.yaml b/mmv1/products/networkservices/go_HttpRoute.yaml index 545e3ae54f1e..410ab0b85aa8 100644 --- a/mmv1/products/networkservices/go_HttpRoute.yaml +++ b/mmv1/products/networkservices/go_HttpRoute.yaml @@ -106,7 +106,6 @@ properties: type: KeyValueLabels description: Set of label tags associated with the HttpRoute resource. min_version: 'beta' - immutable: false - name: 'description' type: String description: | diff --git a/mmv1/products/networkservices/go_LbRouteExtension.yaml b/mmv1/products/networkservices/go_LbRouteExtension.yaml index 3a2b46bde77f..294e3736de7c 100644 --- a/mmv1/products/networkservices/go_LbRouteExtension.yaml +++ b/mmv1/products/networkservices/go_LbRouteExtension.yaml @@ -87,7 +87,6 @@ properties: - name: 'labels' type: KeyValueLabels description: 'Set of labels associated with the LbRouteExtension resource.' - immutable: false - name: 'forwardingRules' type: Array description: | diff --git a/mmv1/products/networkservices/go_LbTrafficExtension.yaml b/mmv1/products/networkservices/go_LbTrafficExtension.yaml index ebb94b424d09..99901353dc7f 100644 --- a/mmv1/products/networkservices/go_LbTrafficExtension.yaml +++ b/mmv1/products/networkservices/go_LbTrafficExtension.yaml @@ -84,7 +84,6 @@ properties: - name: 'labels' type: KeyValueLabels description: 'Set of labels associated with the LbTrafficExtension resource.' - immutable: false - name: 'forwardingRules' type: Array description: | diff --git a/mmv1/products/networkservices/go_Mesh.yaml b/mmv1/products/networkservices/go_Mesh.yaml index 123bb9579487..d6780f3dabdc 100644 --- a/mmv1/products/networkservices/go_Mesh.yaml +++ b/mmv1/products/networkservices/go_Mesh.yaml @@ -95,7 +95,6 @@ properties: type: KeyValueLabels description: Set of label tags associated with the Mesh resource. min_version: 'beta' - immutable: false - name: 'description' type: String description: | diff --git a/mmv1/products/networkservices/go_ServiceBinding.yaml b/mmv1/products/networkservices/go_ServiceBinding.yaml index 04b7668a5e1b..b492fc1d6de4 100644 --- a/mmv1/products/networkservices/go_ServiceBinding.yaml +++ b/mmv1/products/networkservices/go_ServiceBinding.yaml @@ -85,7 +85,6 @@ properties: type: KeyValueLabels description: Set of label tags associated with the ServiceBinding resource. min_version: 'beta' - immutable: false - name: 'description' type: String description: | diff --git a/mmv1/products/networkservices/go_ServiceLbPolicies.yaml b/mmv1/products/networkservices/go_ServiceLbPolicies.yaml index 4612b13b81bd..c60a511386f5 100644 --- a/mmv1/products/networkservices/go_ServiceLbPolicies.yaml +++ b/mmv1/products/networkservices/go_ServiceLbPolicies.yaml @@ -93,7 +93,6 @@ properties: type: KeyValueLabels description: 'Set of label tags associated with the ServiceLbPolicy resource.' min_version: 'beta' - immutable: false - name: 'description' type: String description: | diff --git a/mmv1/products/networkservices/go_TcpRoute.yaml b/mmv1/products/networkservices/go_TcpRoute.yaml index 9367d8556315..32dd2ae2d0a3 100644 --- a/mmv1/products/networkservices/go_TcpRoute.yaml +++ b/mmv1/products/networkservices/go_TcpRoute.yaml @@ -113,7 +113,6 @@ properties: type: KeyValueLabels description: Set of label tags associated with the TcpRoute resource. min_version: 'beta' - immutable: false - name: 'description' type: String description: | @@ -205,3 +204,12 @@ properties: description: | If true, Router will use the destination IP and port of the original connection as the destination of the request. min_version: 'beta' + - name: 'idleTimeout' + type: String + description: | + Specifies the idle timeout for the selected route. The idle timeout is defined as the period in which there are no bytes sent or received on either the upstream or downstream connection. If not set, the default idle timeout is 30 seconds. If set to 0s, the timeout will be disabled. + + A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". + min_version: 'beta' + validation: + regex: '^(0|[1-9][0-9]*)(\.[0-9]{1,9})?s$' diff --git a/mmv1/products/notebooks/go_Location.yaml b/mmv1/products/notebooks/go_Location.yaml index b99bc2d342ce..6513176f1f50 100644 --- a/mmv1/products/notebooks/go_Location.yaml +++ b/mmv1/products/notebooks/go_Location.yaml @@ -44,5 +44,4 @@ properties: - name: 'name' type: String description: 'Name of the Location resource.' - custom_flatten: 'templates/terraform/custom_flatten/go/name_from_self_link.tmpl' diff --git a/mmv1/products/parallelstore/Instance.yaml b/mmv1/products/parallelstore/Instance.yaml index 881bb5b5790a..9ed5aa2d1cdc 100644 --- a/mmv1/products/parallelstore/Instance.yaml +++ b/mmv1/products/parallelstore/Instance.yaml @@ -127,8 +127,8 @@ properties: characters may be allowed in the future. Therefore, you are advised to use an internal label representation, such as JSON, which doesn't rely upon specific characters being disallowed. For example, representing labels - as the string: `name + \"_\" + value` would prove problematic if we were to - allow `\"_\"` in a future release. " + as the string: `name + "_" + value` would prove problematic if we were to + allow `"_"` in a future release. " - !ruby/object:Api::Type::String name: capacityGib description: | diff --git a/mmv1/products/parallelstore/go_Instance.yaml b/mmv1/products/parallelstore/go_Instance.yaml index d066bb7131d0..fa39d556b128 100644 --- a/mmv1/products/parallelstore/go_Instance.yaml +++ b/mmv1/products/parallelstore/go_Instance.yaml @@ -134,8 +134,8 @@ properties: characters may be allowed in the future. Therefore, you are advised to use an internal label representation, such as JSON, which doesn't rely upon specific characters being disallowed. For example, representing labels - as the string: `name + \"_\" + value` would prove problematic if we were to - allow `\"_\"` in a future release. " + as the string: `name + "_" + value` would prove problematic if we were to + allow `"_"` in a future release. " min_version: 'beta' - name: 'capacityGib' type: String diff --git a/mmv1/products/privateca/go_CaPool.yaml b/mmv1/products/privateca/go_CaPool.yaml index 2d0292a2dcb0..a257a9e7bfac 100644 --- a/mmv1/products/privateca/go_CaPool.yaml +++ b/mmv1/products/privateca/go_CaPool.yaml @@ -502,4 +502,3 @@ properties: An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. - immutable: false diff --git a/mmv1/products/privateca/go_Certificate.yaml b/mmv1/products/privateca/go_Certificate.yaml index d7d449927a45..bc46f211cc2f 100644 --- a/mmv1/products/privateca/go_Certificate.yaml +++ b/mmv1/products/privateca/go_Certificate.yaml @@ -672,7 +672,6 @@ properties: type: KeyValueLabels description: | Labels with user-defined metadata to apply to this resource. - immutable: false - name: 'pemCsr' type: String description: | diff --git a/mmv1/products/privateca/go_CertificateAuthority.yaml b/mmv1/products/privateca/go_CertificateAuthority.yaml index 18efff479357..8bd6c9a68729 100644 --- a/mmv1/products/privateca/go_CertificateAuthority.yaml +++ b/mmv1/products/privateca/go_CertificateAuthority.yaml @@ -783,4 +783,3 @@ properties: An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. - immutable: false diff --git a/mmv1/products/privateca/go_CertificateTemplate.yaml b/mmv1/products/privateca/go_CertificateTemplate.yaml index b5a0a7022949..c54e6dab962c 100644 --- a/mmv1/products/privateca/go_CertificateTemplate.yaml +++ b/mmv1/products/privateca/go_CertificateTemplate.yaml @@ -303,4 +303,3 @@ properties: - name: 'labels' type: KeyValueLabels description: Optional. Labels with user-defined metadata. - immutable: false diff --git a/mmv1/products/pubsub/go_Schema.yaml b/mmv1/products/pubsub/go_Schema.yaml index c16b3ab2f43b..068a568afaa2 100644 --- a/mmv1/products/pubsub/go_Schema.yaml +++ b/mmv1/products/pubsub/go_Schema.yaml @@ -41,6 +41,7 @@ async: iam_policy: method_name_separator: ':' parent_resource_attribute: 'schema' + example_config_body: 'templates/terraform/iam/go/iam_attributes.go.tmpl' custom_code: update_encoder: 'templates/terraform/update_encoder/go/pubsub_schema.tmpl' examples: diff --git a/mmv1/products/pubsub/go_Subscription.yaml b/mmv1/products/pubsub/go_Subscription.yaml index 260e1a733403..92bc7d775487 100644 --- a/mmv1/products/pubsub/go_Subscription.yaml +++ b/mmv1/products/pubsub/go_Subscription.yaml @@ -127,7 +127,6 @@ properties: type: KeyValueLabels description: | A set of key/value label pairs to assign to this Subscription. - immutable: false - name: 'bigqueryConfig' type: NestedObject description: | @@ -213,6 +212,10 @@ properties: description: | The maximum bytes that can be written to a Cloud Storage file before a new file is created. Min 1 KB, max 10 GiB. The maxBytes limit may be exceeded in cases where messages are larger than the limit. + - name: 'maxMessages' + type: Integer + description: | + The maximum messages that can be written to a Cloud Storage file before a new file is created. Min 1000 messages. - name: 'state' type: Enum description: | @@ -231,6 +234,10 @@ properties: type: Boolean description: | When true, write the subscription name, messageId, publishTime, attributes, and orderingKey as additional fields in the output. + - name: 'useTopicSchema' + type: Boolean + description: | + When true, the output Cloud Storage file will be serialized using the topic schema, if it exists. - name: 'serviceAccountEmail' type: String description: | @@ -387,8 +394,6 @@ properties: diff_suppress_func: 'comparePubsubSubscriptionExpirationPolicy' - name: 'filter' type: String - validation: !ruby/object:Provider::Terraform::Validation - function: 'verify.ValidateRegexp(`^.{1,256}$`)' description: | The subscription only delivers the messages that match the filter. Pub/Sub automatically acknowledges the messages that don't match the filter. You can filter messages @@ -396,6 +401,8 @@ properties: you can't modify the filter. required: false immutable: true + validation: + regex: '^.{0,256}$' - name: 'deadLetterPolicy' type: NestedObject description: | diff --git a/mmv1/products/pubsub/go_Topic.yaml b/mmv1/products/pubsub/go_Topic.yaml index 807462055e78..87b6d457b0a8 100644 --- a/mmv1/products/pubsub/go_Topic.yaml +++ b/mmv1/products/pubsub/go_Topic.yaml @@ -49,6 +49,7 @@ async: iam_policy: method_name_separator: ':' parent_resource_attribute: 'topic' + example_config_body: 'templates/terraform/iam/go/iam_attributes.go.tmpl' custom_code: encoder: 'templates/terraform/encoders/go/no_send_name.go.tmpl' update_encoder: 'templates/terraform/update_encoder/go/pubsub_topic.tmpl' @@ -105,7 +106,6 @@ properties: type: KeyValueLabels description: | A set of key/value label pairs to assign to this Topic. - immutable: false - name: 'messageStoragePolicy' type: NestedObject description: | @@ -130,7 +130,6 @@ properties: type: NestedObject description: | Settings for validating messages published against a schema. - default_from_api: true properties: - name: 'schema' type: String diff --git a/mmv1/products/redis/go_Cluster.yaml b/mmv1/products/redis/go_Cluster.yaml index 5e643219507a..f9bb64b701a8 100644 --- a/mmv1/products/redis/go_Cluster.yaml +++ b/mmv1/products/redis/go_Cluster.yaml @@ -283,3 +283,122 @@ properties: Configure Redis Cluster behavior using a subset of native Redis configuration parameters. Please check Memorystore documentation for the list of supported parameters: https://cloud.google.com/memorystore/docs/cluster/supported-instance-configurations + - name: 'maintenancePolicy' + type: NestedObject + description: Maintenance policy for a cluster + properties: + - name: 'createTime' + type: String + description: | + Output only. The time when the policy was created. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + resolution and up to nine fractional digits. + output: true + - name: 'updateTime' + type: String + description: | + Output only. The time when the policy was last updated. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + resolution and up to nine fractional digits. + output: true + - name: 'weeklyMaintenanceWindow' + type: Array + description: | + Optional. Maintenance window that is applied to resources covered by this policy. + Minimum 1. For the current version, the maximum number + of weekly_window is expected to be one. + item_type: + type: NestedObject + properties: + - name: 'day' + type: Enum + description: | + Required. The day of week that maintenance updates occur. + + - DAY_OF_WEEK_UNSPECIFIED: The day of the week is unspecified. + - MONDAY: Monday + - TUESDAY: Tuesday + - WEDNESDAY: Wednesday + - THURSDAY: Thursday + - FRIDAY: Friday + - SATURDAY: Saturday + - SUNDAY: Sunday + required: true + enum_values: + - 'DAY_OF_WEEK_UNSPECIFIED' + - 'MONDAY' + - 'TUESDAY' + - 'WEDNESDAY' + - 'THURSDAY' + - 'FRIDAY' + - 'SATURDAY' + - 'SUNDAY' + - name: 'duration' + type: String + description: | + Output only. Duration of the maintenance window. + The current window is fixed at 1 hour. + A duration in seconds with up to nine fractional digits, + terminated by 's'. Example: "3.5s". + output: true + - name: 'startTime' + type: NestedObject + description: | + Required. Start time of the window in UTC time. + required: true + send_empty_value: true + allow_empty_object: true + properties: + - name: 'hours' + type: Integer + description: | + Hours of day in 24 hour format. Should be from 0 to 23. + An API may choose to allow the value "24:00:00" for scenarios like business closing time. + validation: + function: 'validation.IntBetween(0,23)' + - name: 'minutes' + type: Integer + description: | + Minutes of hour of day. Must be from 0 to 59. + validation: + function: 'validation.IntBetween(0,59)' + - name: 'seconds' + type: Integer + description: | + Seconds of minutes of the time. Must normally be from 0 to 59. + An API may allow the value 60 if it allows leap-seconds. + validation: + function: 'validation.IntBetween(0,60)' + - name: 'nanos' + type: Integer + description: | + Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. + validation: + function: 'validation.IntBetween(0,999999999)' + - name: 'maintenanceSchedule' + type: NestedObject + description: Upcoming maintenance schedule. + output: true + properties: + - name: 'startTime' + type: String + description: | + Output only. The start time of any upcoming scheduled maintenance for this cluster. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + resolution and up to nine fractional digits. + output: true + - name: 'endTime' + type: String + description: | + Output only. The end time of any upcoming scheduled maintenance for this cluster. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + resolution and up to nine fractional digits. + output: true + - name: 'scheduleDeadlineTime' + type: String + description: | + Output only. The deadline that the maintenance schedule start time + can not go beyond, including reschedule. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + resolution and up to nine fractional digits. + output: true diff --git a/mmv1/products/securitycenter/go_FolderNotificationConfig.yaml b/mmv1/products/securitycenter/go_FolderNotificationConfig.yaml new file mode 100644 index 000000000000..c5eed19c8747 --- /dev/null +++ b/mmv1/products/securitycenter/go_FolderNotificationConfig.yaml @@ -0,0 +1,130 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'FolderNotificationConfig' +description: | + A Cloud Security Command Center (Cloud SCC) notification configs. A + notification config is a Cloud SCC resource that contains the + configuration to send notifications for create/update events of + findings, assets and etc. + ~> **Note:** In order to use Cloud SCC resources, your organization must be enrolled + in [SCC Standard/Premium](https://cloud.google.com/security-command-center/docs/quickstart-security-command-center). + Without doing so, you may run into errors during resource creation. +references: + guides: + 'Official Documentation': 'https://cloud.google.com/security-command-center/docs' + api: 'https://cloud.google.com/security-command-center/docs/reference/rest/v1/folders.notificationConfigs' +docs: +base_url: 'folders/{{folder}}/notificationConfigs' +self_link: 'folders/{{folder}}/notificationConfigs/{{config_id}}' +create_url: 'folders/{{folder}}/notificationConfigs?configId={{config_id}}' +update_verb: 'PATCH' +update_mask: true +import_format: + - 'folders/{{folder}}/notificationConfigs/{{config_id}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +custom_code: + post_import: 'templates/terraform/post_import/go/scc_v1_folder_notification_config.go.tmpl' +examples: + - name: 'scc_folder_notification_config_basic' + primary_resource_id: 'custom_notification_config' + vars: + folder_display_name: 'folder-name' + config_id: 'my-config' + topic_name: 'my-topic' + test_env_vars: + org_id: 'ORG_ID' + external_providers: ["random", "time"] + skip_test: true +parameters: + - name: 'folder' + type: String + description: | + Numerical ID of the parent folder. + url_param_only: true + required: true + immutable: true + - name: 'configId' + type: String + description: | + This must be unique within the organization. + url_param_only: true + required: true + immutable: true +properties: + - name: 'name' + type: String + description: | + The resource name of this notification config, in the format + `folders/{{folder}}/notificationConfigs/{{config_id}}`. + output: true + - name: 'description' + type: String + description: | + The description of the notification config (max of 1024 characters). + validation: + function: 'validation.StringLenBetween(0, 1024)' + - name: 'pubsubTopic' + type: String + description: | + The Pub/Sub topic to send notifications to. Its format is + "projects/[project_id]/topics/[topic]". + required: true + - name: 'serviceAccount' + type: String + description: | + The service account that needs "pubsub.topics.publish" permission to + publish to the Pub/Sub topic. + output: true + - name: 'streamingConfig' + type: NestedObject + description: | + The config for triggering streaming-based notifications. + required: true + update_mask_fields: + - 'streamingConfig.filter' + properties: + - name: 'filter' + type: String + description: | + Expression that defines the filter to apply across create/update + events of assets or findings as specified by the event type. The + expression is a list of zero or more restrictions combined via + logical operators AND and OR. Parentheses are supported, and OR + has higher precedence than AND. + + Restrictions have the form and may have + a - character in front of them to indicate negation. The fields + map to those defined in the corresponding resource. + + The supported operators are: + + * = for all value types. + * >, <, >=, <= for integer values. + * :, meaning substring matching, for strings. + + The supported value types are: + + * string literals in quotes. + * integer literals without quotes. + * boolean literals true and false without quotes. + + See + [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) + for information on how to write a filter. + required: true diff --git a/mmv1/products/securitycenterv2/go_FolderMuteConfig.yaml b/mmv1/products/securitycenterv2/go_FolderMuteConfig.yaml new file mode 100644 index 000000000000..aab5912d34a3 --- /dev/null +++ b/mmv1/products/securitycenterv2/go_FolderMuteConfig.yaml @@ -0,0 +1,118 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'FolderMuteConfig' +description: | + Mute Findings is a volume management feature in Security Command Center + that lets you manually or programmatically hide irrelevant findings, + and create filters to automatically silence existing and future + findings based on criteria you specify. +references: + guides: + api: 'https://cloud.google.com/security-command-center/docs/reference/rest/v2/folders.muteConfigs' +docs: +base_url: 'folders/{{folder}}/locations/{{location}}/muteConfigs' +self_link: 'folders/{{folder}}/locations/{{location}}/muteConfigs/{{mute_config_id}}' +create_url: 'folders/{{folder}}/locations/{{location}}/muteConfigs?muteConfigId={{mute_config_id}}' +update_verb: 'PATCH' +update_mask: true +import_format: + - 'folders/{{folder}}/locations/{{location}}/muteConfigs/{{mute_config_id}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +custom_code: +examples: + - name: 'scc_v2_folder_mute_config_basic' + primary_resource_id: 'default' + vars: + mute_config_id: 'my-config' + folder_display_name: 'folder-name' + test_env_vars: + org_id: 'ORG_ID' + test_vars_overrides: + 'sleep': 'true' + external_providers: ["random", "time"] + skip_test: true +parameters: + - name: 'folder' + type: String + description: | + The folder whose Cloud Security Command Center the Mute + Config lives in. + url_param_only: true + required: true + immutable: true + - name: 'location' + type: String + description: | + location Id is provided by folder. If not provided, Use global as default. + url_param_only: true + immutable: true + default_value: "global" + - name: 'mute_config_id' + type: String + description: | + Unique identifier provided by the client within the parent scope. + url_param_only: true + required: true + immutable: true +properties: + - name: 'name' + type: String + description: | + Name of the mute config. Its format is + organizations/{organization}/locations/global/muteConfigs/{configId}, + folders/{folder}/locations/global/muteConfigs/{configId}, + or projects/{project}/locations/global/muteConfigs/{configId} + output: true + - name: 'description' + type: String + description: A description of the mute config. + - name: 'filter' + type: String + description: | + An expression that defines the filter to apply across create/update + events of findings. While creating a filter string, be mindful of + the scope in which the mute configuration is being created. E.g., + If a filter contains project = X but is created under the + project = Y scope, it might not match any findings. + required: true + - name: 'createTime' + type: String + description: | + The time at which the mute config was created. This field is set by + the server and will be ignored if provided on config creation. + output: true + - name: 'updateTime' + type: String + description: | + Output only. The most recent time at which the mute config was + updated. This field is set by the server and will be ignored if + provided on config creation or update. + output: true + - name: 'mostRecentEditor' + type: String + description: | + Email address of the user who last edited the mute config. This + field is set by the server and will be ignored if provided on + config creation or update. + output: true + - name: 'type' + type: String + description: | + The type of the mute config. + required: true diff --git a/mmv1/products/securitycenterv2/go_FolderNotificationConfig.yaml b/mmv1/products/securitycenterv2/go_FolderNotificationConfig.yaml new file mode 100644 index 000000000000..5ec56fd5828d --- /dev/null +++ b/mmv1/products/securitycenterv2/go_FolderNotificationConfig.yaml @@ -0,0 +1,138 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'FolderNotificationConfig' +description: | + A Cloud Security Command Center (Cloud SCC) notification configs. A + notification config is a Cloud SCC resource that contains the + configuration to send notifications for create/update events of + findings, assets and etc. + ~> **Note:** In order to use Cloud SCC resources, your organization must be enrolled + in [SCC Standard/Premium](https://cloud.google.com/security-command-center/docs/quickstart-security-command-center). + Without doing so, you may run into errors during resource creation. +references: + guides: + 'Official Documentation': 'https://cloud.google.com/security-command-center/docs' + api: 'https://cloud.google.com/security-command-center/docs/reference/rest/v2/folders.locations.notificationConfigs' +docs: +base_url: 'folders/{{folder}}/locations/{{location}}/notificationConfigs' +self_link: 'folders/{{folder}}/locations/{{location}}/notificationConfigs/{{config_id}}' +create_url: 'folders/{{folder}}/locations/{{location}}/notificationConfigs?configId={{config_id}}' +update_verb: 'PATCH' +update_mask: true +import_format: + - 'folders/{{folder}}/locations/{{location}}/notificationConfigs/{{config_id}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +custom_code: + post_import: 'templates/terraform/post_import/go/scc_v2_folder_notification_config.go.tmpl' +examples: + - name: 'scc_v2_folder_notification_config_basic' + primary_resource_id: 'custom_notification_config' + vars: + folder_display_name: 'folder-name' + config_id: 'my-config' + topic_name: 'my-topic' + test_env_vars: + org_id: 'ORG_ID' + external_providers: ["random", "time"] + skip_test: true +parameters: + - name: 'folder' + type: String + description: | + Numerical ID of the parent folder. + url_param_only: true + required: true + immutable: true + - name: 'location' + type: String + description: | + Location ID of the parent organization. If not provided, 'global' will be used as the default location. + url_param_only: true + required: false + immutable: true + default_value: "global" + - name: 'configId' + type: String + description: | + This must be unique within the organization. + url_param_only: true + required: true + immutable: true +properties: + - name: 'name' + type: String + description: | + The resource name of this notification config, in the format + `folders/{{folder}}/locations/{{location}}/notificationConfigs/{{config_id}}`. + output: true + - name: 'description' + type: String + description: | + The description of the notification config (max of 1024 characters). + validation: + function: 'validation.StringLenBetween(0, 1024)' + - name: 'pubsubTopic' + type: String + description: | + The Pub/Sub topic to send notifications to. Its format is + "projects/[project_id]/topics/[topic]". + required: true + - name: 'serviceAccount' + type: String + description: | + The service account that needs "pubsub.topics.publish" permission to + publish to the Pub/Sub topic. + output: true + - name: 'streamingConfig' + type: NestedObject + description: | + The config for triggering streaming-based notifications. + required: true + update_mask_fields: + - 'streamingConfig.filter' + properties: + - name: 'filter' + type: String + description: | + Expression that defines the filter to apply across create/update + events of assets or findings as specified by the event type. The + expression is a list of zero or more restrictions combined via + logical operators AND and OR. Parentheses are supported, and OR + has higher precedence than AND. + + Restrictions have the form and may have + a - character in front of them to indicate negation. The fields + map to those defined in the corresponding resource. + + The supported operators are: + + * = for all value types. + * >, <, >=, <= for integer values. + * :, meaning substring matching, for strings. + + The supported value types are: + + * string literals in quotes. + * integer literals without quotes. + * boolean literals true and false without quotes. + + See + [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) + for information on how to write a filter. + required: true diff --git a/mmv1/products/securitycenterv2/go_FolderSccBigQueryExports.yaml b/mmv1/products/securitycenterv2/go_FolderSccBigQueryExports.yaml new file mode 100644 index 000000000000..1c3263f4b062 --- /dev/null +++ b/mmv1/products/securitycenterv2/go_FolderSccBigQueryExports.yaml @@ -0,0 +1,152 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'FolderSccBigQueryExports' +description: | + A Cloud Security Command Center (Cloud SCC) Big Query Export Config. + It represents exporting Security Command Center data, including assets, findings, and security marks + using gcloud scc bqexports + ~> **Note:** In order to use Cloud SCC resources, your organization must be enrolled + in [SCC Standard/Premium](https://cloud.google.com/security-command-center/docs/quickstart-security-command-center). + Without doing so, you may run into errors during resource creation. +references: + guides: + 'Official Documentation': 'https://cloud.google.com/security-command-center/docs/how-to-analyze-findings-in-big-query' + api: 'https://cloud.google.com/security-command-center/docs/reference/rest/v2/folders.locations.bigQueryExports' +docs: +base_url: 'folders/{{folder}}/locations/{{location}}/bigQueryExports' +self_link: 'folders/{{folder}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}' +create_url: 'folders/{{folder}}/locations/{{location}}/bigQueryExports?bigQueryExportId={{big_query_export_id}}' +update_verb: 'PATCH' +update_mask: true +import_format: + - 'folders/{{folder}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +custom_code: +examples: + - name: 'scc_v2_folder_big_query_export_config_basic' + primary_resource_id: 'custom_big_query_export_config' + vars: + big_query_export_id: 'my-export' + dataset: 'my-dataset' + dataset_id: 'my_dataset_id' + name: 'my-export' + folder_display_name: 'folder-name' + test_env_vars: + org_id: 'ORG_ID' + project: 'PROJECT_NAME' + external_providers: ["random", "time"] + skip_test: true +parameters: + - name: 'folder' + type: String + description: | + The folder where Cloud Security Command Center Big Query Export + Config lives in. + url_param_only: true + required: true + immutable: true + - name: 'bigQueryExportId' + type: String + description: | + This must be unique within the organization. It must consist of only lowercase letters, + numbers, and hyphens, must start with a letter, must end with either a letter or a number, + and must be 63 characters or less. + url_param_only: true + required: true + immutable: true + - name: 'location' + type: String + description: | + The BigQuery export configuration is stored in this location. If not provided, Use global as default. + url_param_only: true + immutable: true + default_value: "global" +properties: + - name: 'name' + type: String + description: | + The resource name of this export, in the format + `folders/{{folder}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}`. + This field is provided in responses, and is ignored when provided in create requests. + output: true + - name: 'description' + type: String + description: | + The description of the notification config (max of 1024 characters). + validation: + function: 'validation.StringLenBetween(0, 1024)' + - name: 'dataset' + type: String + description: | + The dataset to write findings' updates to. + Its format is "projects/[projectId]/datasets/[bigquery_dataset_id]". + BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + - name: 'createTime' + type: String + description: | + The time at which the BigQuery export was created. This field is set by the server and will be ignored if provided on export on creation. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + output: true + - name: 'updateTime' + type: String + description: | + The most recent time at which the BigQuery export was updated. This field is set by the server and will be ignored if provided on export creation or update. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + output: true + - name: 'mostRecentEditor' + type: String + description: | + Email address of the user who last edited the BigQuery export. + This field is set by the server and will be ignored if provided on export creation or update. + output: true + - name: 'principal' + type: String + description: | + The service account that needs permission to create table and upload data to the BigQuery dataset. + output: true + - name: 'filter' + type: String + description: | + Expression that defines the filter to apply across create/update + events of findings. The + expression is a list of zero or more restrictions combined via + logical operators AND and OR. Parentheses are supported, and OR + has higher precedence than AND. + + Restrictions have the form and may have + a - character in front of them to indicate negation. The fields + map to those defined in the corresponding resource. + + The supported operators are: + + * = for all value types. + * >, <, >=, <= for integer values. + * :, meaning substring matching, for strings. + + The supported value types are: + + * string literals in quotes. + * integer literals without quotes. + * boolean literals true and false without quotes. + + See + [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) + for information on how to write a filter. diff --git a/mmv1/products/securitycenterv2/go_OrganizationMuteConfig.yaml b/mmv1/products/securitycenterv2/go_OrganizationMuteConfig.yaml new file mode 100644 index 000000000000..6449a6e49b9a --- /dev/null +++ b/mmv1/products/securitycenterv2/go_OrganizationMuteConfig.yaml @@ -0,0 +1,113 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'OrganizationMuteConfig' +description: | + Mute Findings is a volume management feature in Security Command Center + that lets you manually or programmatically hide irrelevant findings, + and create filters to automatically silence existing and future + findings based on criteria you specify. +references: + guides: + api: 'https://cloud.google.com/security-command-center/docs/reference/rest/v2/organizations.muteConfigs' +docs: +base_url: 'organizations/{{organization}}/locations/{{location}}/muteConfigs' +self_link: 'organizations/{{organization}}/locations/{{location}}/muteConfigs/{{mute_config_id}}' +create_url: 'organizations/{{organization}}/locations/{{location}}/muteConfigs?muteConfigId={{mute_config_id}}' +update_verb: 'PATCH' +update_mask: true +import_format: + - 'organizations/{{organization}}/locations/{{location}}/muteConfigs/{{mute_config_id}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +custom_code: +examples: + - name: 'scc_v2_organization_mute_config_basic' + primary_resource_id: 'default' + vars: + mute_config_id: 'my-config' + test_env_vars: + org_id: 'ORG_ID' +parameters: + - name: 'organization' + type: String + description: | + The organization whose Cloud Security Command Center the Mute + Config lives in. + url_param_only: true + required: true + immutable: true + - name: 'location' + type: String + description: | + location Id is provided by organization. If not provided, Use global as default. + url_param_only: true + immutable: true + default_value: "global" + - name: 'mute_config_id' + type: String + description: | + Unique identifier provided by the client within the parent scope. + url_param_only: true + required: true + immutable: true +properties: + - name: 'name' + type: String + description: | + Name of the mute config. Its format is + organizations/{organization}/locations/global/muteConfigs/{configId}, + folders/{folder}/locations/global/muteConfigs/{configId}, + or projects/{project}/locations/global/muteConfigs/{configId} + output: true + - name: 'description' + type: String + description: A description of the mute config. + - name: 'filter' + type: String + description: | + An expression that defines the filter to apply across create/update + events of findings. While creating a filter string, be mindful of + the scope in which the mute configuration is being created. E.g., + If a filter contains project = X but is created under the + project = Y scope, it might not match any findings. + required: true + - name: 'createTime' + type: String + description: | + The time at which the mute config was created. This field is set by + the server and will be ignored if provided on config creation. + output: true + - name: 'updateTime' + type: String + description: | + Output only. The most recent time at which the mute config was + updated. This field is set by the server and will be ignored if + provided on config creation or update. + output: true + - name: 'mostRecentEditor' + type: String + description: | + Email address of the user who last edited the mute config. This + field is set by the server and will be ignored if provided on + config creation or update. + output: true + - name: 'type' + type: String + description: | + The type of the mute config. + required: true diff --git a/mmv1/products/securitycenterv2/go_OrganizationNotificationConfig.yaml b/mmv1/products/securitycenterv2/go_OrganizationNotificationConfig.yaml new file mode 100644 index 000000000000..9ba89f6b3fd8 --- /dev/null +++ b/mmv1/products/securitycenterv2/go_OrganizationNotificationConfig.yaml @@ -0,0 +1,134 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'OrganizationNotificationConfig' +description: | + A Cloud Security Command Center (Cloud SCC) notification configs. A + notification config is a Cloud SCC resource that contains the + configuration to send notifications for create/update events of + findings, assets and etc. + ~> **Note:** In order to use Cloud SCC resources, your organization must be enrolled + in [SCC Standard/Premium](https://cloud.google.com/security-command-center/docs/quickstart-security-command-center). + Without doing so, you may run into errors during resource creation. +references: + guides: + 'Official Documentation': 'https://cloud.google.com/security-command-center/docs' + api: 'https://cloud.google.com/security-command-center/docs/reference/rest/v2/organizations.locations.notificationConfigs' +docs: +base_url: 'organizations/{{organization}}/locations/{{location}}/notificationConfigs' +self_link: '{{name}}' +create_url: 'organizations/{{organization}}/locations/{{location}}/notificationConfigs?configId={{config_id}}' +update_verb: 'PATCH' +update_mask: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +custom_code: + post_create: 'templates/terraform/post_create/go/set_computed_name.tmpl' + custom_import: 'templates/terraform/custom_import/go/scc_v2_source_self_link_as_name_set_organization.go.tmpl' +examples: + - name: 'scc_v2_organization_notification_config_basic' + primary_resource_id: 'custom_organization_notification_config' + vars: + topic_name: 'my-topic' + config_id: 'my-config' + test_env_vars: + org_id: 'ORG_ID' +parameters: + - name: 'organization' + type: String + description: | + The organization whose Cloud Security Command Center the Notification + Config lives in. + url_param_only: true + required: true + immutable: true + - name: 'configId' + type: String + description: | + This must be unique within the organization. + url_param_only: true + required: true + immutable: true + - name: 'location' + type: String + description: | + location Id is provided by organization. If not provided, Use global as default. + url_param_only: true + immutable: true + default_value: "global" +properties: + - name: 'name' + type: String + description: | + The resource name of this notification config, in the format + `organizations/{{organization}}/notificationConfigs/{{config_id}}`. + output: true + - name: 'description' + type: String + description: | + The description of the notification config (max of 1024 characters). + validation: + function: 'validation.StringLenBetween(0, 1024)' + - name: 'pubsubTopic' + type: String + description: | + The Pub/Sub topic to send notifications to. Its format is + "projects/[project_id]/topics/[topic]". + required: true + - name: 'serviceAccount' + type: String + description: | + The service account that needs "pubsub.topics.publish" permission to + publish to the Pub/Sub topic. + output: true + - name: 'streamingConfig' + type: NestedObject + description: | + The config for triggering streaming-based notifications. + required: true + update_mask_fields: + - 'streamingConfig.filter' + properties: + - name: 'filter' + type: String + description: | + Expression that defines the filter to apply across create/update + events of assets or findings as specified by the event type. The + expression is a list of zero or more restrictions combined via + logical operators AND and OR. Parentheses are supported, and OR + has higher precedence than AND. + + Restrictions have the form and may have + a - character in front of them to indicate negation. The fields + map to those defined in the corresponding resource. + + The supported operators are: + + * = for all value types. + * >, <, >=, <= for integer values. + * :, meaning substring matching, for strings. + + The supported value types are: + + * string literals in quotes. + * integer literals without quotes. + * boolean literals true and false without quotes. + + See + [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) + for information on how to write a filter. + required: true diff --git a/mmv1/products/securitycenterv2/go_OrganizationSccBigQueryExports.yaml b/mmv1/products/securitycenterv2/go_OrganizationSccBigQueryExports.yaml new file mode 100644 index 000000000000..b2f070bb4512 --- /dev/null +++ b/mmv1/products/securitycenterv2/go_OrganizationSccBigQueryExports.yaml @@ -0,0 +1,148 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'OrganizationSccBigQueryExports' +description: | + A Cloud Security Command Center (Cloud SCC) Big Query Export Config. + It represents exporting Security Command Center data, including assets, findings, and security marks + using gcloud scc bqexports + ~> **Note:** In order to use Cloud SCC resources, your organization must be enrolled + in [SCC Standard/Premium](https://cloud.google.com/security-command-center/docs/quickstart-security-command-center). + Without doing so, you may run into errors during resource creation. +references: + guides: + 'Official Documentation': 'https://cloud.google.com/security-command-center/docs/how-to-analyze-findings-in-big-query' + api: 'https://cloud.google.com/security-command-center/docs/reference/rest/v2/organizations.locations.bigQueryExports' +docs: +base_url: 'organizations/{{organization}}/locations/{{location}}/bigQueryExports' +self_link: 'organizations/{{organization}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}' +create_url: 'organizations/{{organization}}/locations/{{location}}/bigQueryExports?bigQueryExportId={{big_query_export_id}}' +update_verb: 'PATCH' +update_mask: true +import_format: + - 'organizations/{{organization}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +custom_code: + post_import: 'templates/terraform/post_import/go/scc_v2_organization_big_query_export_config.go.tmpl' +examples: + - name: 'scc_v2_organization_big_query_export_config_basic' + primary_resource_id: 'custom_big_query_export_config' + vars: + big_query_export_id: 'my-export' + dataset: 'my-dataset' + name: 'my-export' + test_env_vars: + org_id: 'ORG_ID' + project: 'PROJECT_NAME' + external_providers: ["random", "time"] + skip_test: true +parameters: + - name: 'organization' + type: String + description: | + The organization whose Cloud Security Command Center the Big Query Export + Config lives in. + url_param_only: true + required: true + immutable: true + - name: 'bigQueryExportId' + type: String + description: | + This must be unique within the organization. + url_param_only: true + required: true + immutable: true + - name: 'location' + type: String + description: | + location Id is provided by organization. If not provided, Use global as default. + url_param_only: true + immutable: true + default_value: "global" +properties: + - name: 'name' + type: String + description: | + The resource name of this export, in the format + `organizations/{{organization}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}`. + This field is provided in responses, and is ignored when provided in create requests. + - name: 'description' + type: String + description: | + The description of the notification config (max of 1024 characters). + validation: + function: 'validation.StringLenBetween(0, 1024)' + - name: 'dataset' + type: String + description: | + The dataset to write findings' updates to. + Its format is "projects/[projectId]/datasets/[bigquery_dataset_id]". + BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + - name: 'createTime' + type: String + description: | + The time at which the BigQuery export was created. This field is set by the server and will be ignored if provided on export on creation. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + output: true + - name: 'updateTime' + type: String + description: | + The most recent time at which the BigQuery export was updated. This field is set by the server and will be ignored if provided on export creation or update. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + output: true + - name: 'mostRecentEditor' + type: String + description: | + Email address of the user who last edited the BigQuery export. + This field is set by the server and will be ignored if provided on export creation or update. + output: true + - name: 'principal' + type: String + description: | + The service account that needs permission to create table and upload data to the BigQuery dataset. + output: true + - name: 'filter' + type: String + description: | + Expression that defines the filter to apply across create/update + events of findings. The + expression is a list of zero or more restrictions combined via + logical operators AND and OR. Parentheses are supported, and OR + has higher precedence than AND. + + Restrictions have the form and may have + a - character in front of them to indicate negation. The fields + map to those defined in the corresponding resource. + + The supported operators are: + + * = for all value types. + * >, <, >=, <= for integer values. + * :, meaning substring matching, for strings. + + The supported value types are: + + * string literals in quotes. + * integer literals without quotes. + * boolean literals true and false without quotes. + + See + [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) + for information on how to write a filter. diff --git a/mmv1/products/securitycenterv2/go_OrganizationSource.yaml b/mmv1/products/securitycenterv2/go_OrganizationSource.yaml new file mode 100644 index 000000000000..e02d659237fd --- /dev/null +++ b/mmv1/products/securitycenterv2/go_OrganizationSource.yaml @@ -0,0 +1,88 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'OrganizationSource' +description: | + A Cloud Security Command Center's (Cloud SCC) finding source. A finding + source is an entity or a mechanism that can produce a finding. A source is + like a container of findings that come from the same scanner, logger, + monitor, etc. +references: + guides: + 'Official Documentation': 'https://cloud.google.com/security-command-center/docs' + api: 'https://cloud.google.com/security-command-center/docs/reference/rest/v2/organizations.sources' +docs: +base_url: 'organizations/{{organization}}/sources' +self_link: '{{name}}' +update_verb: 'PATCH' +update_mask: true +skip_delete: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +iam_policy: + method_name_separator: ':' + fetch_iam_policy_verb: 'POST' + parent_resource_attribute: 'source' + base_url: 'organizations/{{organization}}/sources/{{source}}' + example_config_body: 'templates/terraform/iam/go/iam_attributes.go.tmpl' + import_format: + - 'organizations/{{organization}}/sources/{{source}}' + - '{{source}}' +custom_code: + post_create: 'templates/terraform/post_create/go/set_computed_name.tmpl' + custom_import: 'templates/terraform/custom_import/go/scc_source_self_link_as_name_set_organization.go.tmpl' +examples: + - name: 'scc_source_basic' + primary_resource_id: 'custom_source' + vars: + source_display_name: 'My Source' + test_env_vars: + org_id: 'ORG_ID' + skip_test: true +parameters: + - name: 'organization' + type: String + description: | + The organization whose Cloud Security Command Center the Source + lives in. + url_param_only: true + required: true + immutable: true +properties: + - name: 'name' + type: String + description: | + The resource name of this source, in the format + `organizations/{{organization}}/sources/{{source}}`. + output: true + - name: 'description' + type: String + description: | + The description of the source (max of 1024 characters). + validation: + function: 'validation.StringLenBetween(0, 1024)' + - name: 'displayName' + type: String + description: | + The source’s display name. A source’s display name must be unique + amongst its siblings, for example, two sources with the same parent + can't share the same display name. The display name must start and end + with a letter or digit, may contain letters, digits, spaces, hyphens, + and underscores, and can be no longer than 32 characters. + required: true + validation: + regex: '[\p{L}\p{N}]({\p{L}\p{N}_- ]{0,30}[\p{L}\p{N}])?' diff --git a/mmv1/products/securitycenterv2/go_ProjectMuteConfig.yaml b/mmv1/products/securitycenterv2/go_ProjectMuteConfig.yaml new file mode 100644 index 000000000000..2fd80176f7f0 --- /dev/null +++ b/mmv1/products/securitycenterv2/go_ProjectMuteConfig.yaml @@ -0,0 +1,106 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'ProjectMuteConfig' +description: | + Mute Findings is a volume management feature in Security Command Center + that lets you manually or programmatically hide irrelevant findings, + and create filters to automatically silence existing and future + findings based on criteria you specify. +references: + guides: + api: 'https://cloud.google.com/security-command-center/docs/reference/rest/v2/projects.muteConfigs' +docs: +base_url: 'projects/{{project}}/locations/{{location}}/muteConfigs' +self_link: 'projects/{{project}}/locations/{{location}}/muteConfigs/{{mute_config_id}}' +create_url: 'projects/{{project}}/locations/{{location}}/muteConfigs?muteConfigId={{mute_config_id}}' +update_verb: 'PATCH' +update_mask: true +import_format: + - 'projects/{{project}}/locations/{{location}}/muteConfigs/{{mute_config_id}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +custom_code: +examples: + - name: 'scc_v2_project_mute_config_basic' + primary_resource_id: 'default' + vars: + mute_config_id: 'my-config' + test_env_vars: + project_id: 'PROJECT_NAME' + skip_test: true +parameters: + - name: 'location' + type: String + description: | + location Id is provided by project. If not provided, Use global as default. + url_param_only: true + immutable: true + default_value: "global" + - name: 'mute_config_id' + type: String + description: | + Unique identifier provided by the client within the parent scope. + url_param_only: true + required: true + immutable: true +properties: + - name: 'name' + type: String + description: | + Name of the mute config. Its format is + projects/{project}/locations/global/muteConfigs/{configId}, + folders/{folder}/locations/global/muteConfigs/{configId}, + or organizations/{organization}/locations/global/muteConfigs/{configId} + output: true + - name: 'description' + type: String + description: A description of the mute config. + - name: 'filter' + type: String + description: | + An expression that defines the filter to apply across create/update + events of findings. While creating a filter string, be mindful of + the scope in which the mute configuration is being created. E.g., + If a filter contains project = X but is created under the + project = Y scope, it might not match any findings. + required: true + - name: 'createTime' + type: String + description: | + The time at which the mute config was created. This field is set by + the server and will be ignored if provided on config creation. + output: true + - name: 'updateTime' + type: String + description: | + Output only. The most recent time at which the mute config was + updated. This field is set by the server and will be ignored if + provided on config creation or update. + output: true + - name: 'mostRecentEditor' + type: String + description: | + Email address of the user who last edited the mute config. This + field is set by the server and will be ignored if provided on + config creation or update. + output: true + - name: 'type' + type: String + description: | + The type of the mute config. + required: true diff --git a/mmv1/products/securitycenterv2/go_ProjectNotificationConfig.yaml b/mmv1/products/securitycenterv2/go_ProjectNotificationConfig.yaml new file mode 100644 index 000000000000..62f92ffa8f3c --- /dev/null +++ b/mmv1/products/securitycenterv2/go_ProjectNotificationConfig.yaml @@ -0,0 +1,131 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'ProjectNotificationConfig' +description: | + A Cloud Security Command Center (Cloud SCC) notification configs. A + notification config is a Cloud SCC resource that contains the + configuration to send notifications for create/update events of + findings, assets and etc. + ~> **Note:** In order to use Cloud SCC resources, your organization must be enrolled + in [SCC Standard/Premium](https://cloud.google.com/security-command-center/docs/quickstart-security-command-center). + Without doing so, you may run into errors during resource creation. +references: + guides: + 'Official Documentation': 'https://cloud.google.com/security-command-center/docs' + api: 'https://cloud.google.com/security-command-center/docs/reference/rest/v2/projects.locations.notificationConfigs' +docs: +base_url: 'projects/{{project}}/locations/{{location}}/notificationConfigs' +self_link: '{{name}}' +create_url: 'projects/{{project}}/locations/{{location}}/notificationConfigs?configId={{config_id}}' +update_verb: 'PATCH' +update_mask: true +import_format: + - 'projects/{{project}}/locations/{{location}}/notificationConfigs/{{config_id}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +custom_code: + post_create: 'templates/terraform/post_create/go/set_computed_name.tmpl' + custom_import: 'templates/terraform/custom_import/go/self_link_as_name_set_project.go.tmpl' +examples: + - name: 'scc_v2_project_notification_config_basic' + primary_resource_id: 'custom_notification_config' + vars: + topic_name: 'my-topic' + config_id: 'my-config' + test_env_vars: + project: 'PROJECT_NAME' + ignore_read_extra: + - 'location' + - 'project' +parameters: + - name: 'configId' + type: String + description: | + This must be unique within the project. + url_param_only: true + required: true + immutable: true + - name: 'location' + type: String + description: | + Location ID of the parent organization. Only global is supported at the moment. + url_param_only: true + required: false + immutable: true + default_value: "global" +properties: + - name: 'name' + type: String + description: | + The resource name of this notification config, in the format + `projects/{{projectId}}/locations/{{location}}/notificationConfigs/{{config_id}}`. + output: true + - name: 'description' + type: String + description: | + The description of the notification config (max of 1024 characters). + validation: + function: 'validation.StringLenBetween(0, 1024)' + - name: 'pubsubTopic' + type: String + description: | + The Pub/Sub topic to send notifications to. Its format is + "projects/[project_id]/topics/[topic]". + - name: 'serviceAccount' + type: String + description: | + The service account that needs "pubsub.topics.publish" permission to + publish to the Pub/Sub topic. + output: true + - name: 'streamingConfig' + type: NestedObject + description: | + The config for triggering streaming-based notifications. + required: true + update_mask_fields: + - 'streamingConfig.filter' + properties: + - name: 'filter' + type: String + description: | + Expression that defines the filter to apply across create/update + events of assets or findings as specified by the event type. The + expression is a list of zero or more restrictions combined via + logical operators AND and OR. Parentheses are supported, and OR + has higher precedence than AND. + + Restrictions have the form and may have + a - character in front of them to indicate negation. The fields + map to those defined in the corresponding resource. + + The supported operators are: + + * = for all value types. + * >, <, >=, <= for integer values. + * :, meaning substring matching, for strings. + + The supported value types are: + + * string literals in quotes. + * integer literals without quotes. + * boolean literals true and false without quotes. + + See + [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) + for information on how to write a filter. + required: true diff --git a/mmv1/products/securitycenterv2/go_ProjectSccBigQueryExports.yaml b/mmv1/products/securitycenterv2/go_ProjectSccBigQueryExports.yaml new file mode 100644 index 000000000000..73c06de1c11b --- /dev/null +++ b/mmv1/products/securitycenterv2/go_ProjectSccBigQueryExports.yaml @@ -0,0 +1,143 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'ProjectSccBigQueryExports' +description: | + A Cloud Security Command Center (Cloud SCC) Big Query Export Config. + It represents exporting Security Command Center data, including assets, findings, and security marks + using gcloud scc bqexports + ~> **Note:** In order to use Cloud SCC resources, your organization must be enrolled + in [SCC Standard/Premium](https://cloud.google.com/security-command-center/docs/quickstart-security-command-center). + Without doing so, you may run into errors during resource creation. +references: + guides: + 'Official Documentation': 'https://cloud.google.com/security-command-center/docs/how-to-analyze-findings-in-big-query' + api: 'https://cloud.google.com/security-command-center/docs/reference/rest/v2/projects.locations.bigQueryExports' +docs: +base_url: 'projects/{{project}}/locations/{{location}}/bigQueryExports' +self_link: 'projects/{{project}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}' +create_url: 'projects/{{project}}/locations/{{location}}/bigQueryExports?bigQueryExportId={{big_query_export_id}}' +update_verb: 'PATCH' +update_mask: true +import_format: + - 'projects/{{project}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +custom_code: +examples: + - name: 'scc_v2_project_big_query_export_config_basic' + primary_resource_id: 'custom_big_query_export_config' + vars: + big_query_export_id: 'my-export' + dataset: 'my-dataset' + dataset_id: 'my_dataset_id' + name: 'my-export' + test_env_vars: + org_id: 'ORG_ID' + project: 'PROJECT_NAME' + ignore_read_extra: + - 'project' + external_providers: ["random", "time"] + skip_test: true +parameters: + - name: 'bigQueryExportId' + type: String + description: | + This must be unique within the organization. + url_param_only: true + required: true + immutable: true + - name: 'location' + type: String + description: | + location Id is provided by organization. If not provided, Use global as default. + url_param_only: true + immutable: true + default_value: "global" +properties: + - name: 'name' + type: String + description: | + The resource name of this export, in the format + `projects/{{project}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}`. + This field is provided in responses, and is ignored when provided in create requests. + output: true + - name: 'description' + type: String + description: | + The description of the notification config (max of 1024 characters). + validation: + function: 'validation.StringLenBetween(0, 1024)' + - name: 'dataset' + type: String + description: | + The dataset to write findings' updates to. + Its format is "projects/[projectId]/datasets/[bigquery_dataset_id]". + BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + - name: 'createTime' + type: String + description: | + The time at which the BigQuery export was created. This field is set by the server and will be ignored if provided on export on creation. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + output: true + - name: 'updateTime' + type: String + description: | + The most recent time at which the BigQuery export was updated. This field is set by the server and will be ignored if provided on export creation or update. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + output: true + - name: 'mostRecentEditor' + type: String + description: | + Email address of the user who last edited the BigQuery export. + This field is set by the server and will be ignored if provided on export creation or update. + output: true + - name: 'principal' + type: String + description: | + The service account that needs permission to create table and upload data to the BigQuery dataset. + output: true + - name: 'filter' + type: String + description: | + Expression that defines the filter to apply across create/update + events of findings. The + expression is a list of zero or more restrictions combined via + logical operators AND and OR. Parentheses are supported, and OR + has higher precedence than AND. + + Restrictions have the form and may have + a - character in front of them to indicate negation. The fields + map to those defined in the corresponding resource. + + The supported operators are: + + * = for all value types. + * >, <, >=, <= for integer values. + * :, meaning substring matching, for strings. + + The supported value types are: + + * string literals in quotes. + * integer literals without quotes. + * boolean literals true and false without quotes. + + See + [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) + for information on how to write a filter. diff --git a/mmv1/products/securitycenterv2/go_product.yaml b/mmv1/products/securitycenterv2/go_product.yaml new file mode 100644 index 000000000000..b01078c6e2c9 --- /dev/null +++ b/mmv1/products/securitycenterv2/go_product.yaml @@ -0,0 +1,23 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'SecurityCenterV2' +legacy_name: 'scc_v2' +display_name: 'Security Command Center (SCC)v2 API' +versions: + - name: 'ga' + base_url: 'https://securitycenter.googleapis.com/v2/' +scopes: + - 'https://www.googleapis.com/auth/cloud-platform' diff --git a/mmv1/products/vertexai/go_Dataset.yaml b/mmv1/products/vertexai/go_Dataset.yaml index ead71c55abb7..c6c09f09bbd8 100644 --- a/mmv1/products/vertexai/go_Dataset.yaml +++ b/mmv1/products/vertexai/go_Dataset.yaml @@ -82,7 +82,6 @@ properties: type: KeyValueLabels description: | A set of key/value label pairs to assign to this Workflow. - immutable: false - name: 'encryptionSpec' type: NestedObject description: | diff --git a/mmv1/products/vertexai/go_Endpoint.yaml b/mmv1/products/vertexai/go_Endpoint.yaml index 21d6def117b1..12c85f5c593b 100644 --- a/mmv1/products/vertexai/go_Endpoint.yaml +++ b/mmv1/products/vertexai/go_Endpoint.yaml @@ -373,7 +373,6 @@ properties: can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. - immutable: false - name: 'createTime' type: String description: Output only. Timestamp when this Endpoint was created. diff --git a/mmv1/products/vertexai/go_FeatureGroup.yaml b/mmv1/products/vertexai/go_FeatureGroup.yaml index 976e31428985..241a5d566c46 100644 --- a/mmv1/products/vertexai/go_FeatureGroup.yaml +++ b/mmv1/products/vertexai/go_FeatureGroup.yaml @@ -78,7 +78,6 @@ properties: - name: 'labels' type: KeyValueLabels description: The labels with user-defined metadata to organize your FeatureGroup. - immutable: false - name: 'description' type: String description: The description of the FeatureGroup. diff --git a/mmv1/products/vertexai/go_FeatureGroupFeature.yaml b/mmv1/products/vertexai/go_FeatureGroupFeature.yaml index 40905b1e27c3..d41e7d49fafb 100644 --- a/mmv1/products/vertexai/go_FeatureGroupFeature.yaml +++ b/mmv1/products/vertexai/go_FeatureGroupFeature.yaml @@ -90,7 +90,6 @@ properties: - name: 'labels' type: KeyValueLabels description: The labels with user-defined metadata to organize your FeatureGroup. - immutable: false - name: 'description' type: String description: The description of the FeatureGroup. diff --git a/mmv1/products/vertexai/go_FeatureOnlineStore.yaml b/mmv1/products/vertexai/go_FeatureOnlineStore.yaml index 607c366c9419..9838eac120ae 100644 --- a/mmv1/products/vertexai/go_FeatureOnlineStore.yaml +++ b/mmv1/products/vertexai/go_FeatureOnlineStore.yaml @@ -64,6 +64,7 @@ examples: name: 'example_feature_online_store_beta_bigtable' ignore_read_extra: - 'force_destroy' + skip_vcr: true virtual_fields: - name: 'force_destroy' description: @@ -102,7 +103,6 @@ properties: - name: 'labels' type: KeyValueLabels description: The labels with user-defined metadata to organize your feature online stores. - immutable: false - name: 'state' type: String description: The state of the Feature Online Store. See the possible states in [this link](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.featureOnlineStores#state). diff --git a/mmv1/products/vertexai/go_FeatureOnlineStoreFeatureview.yaml b/mmv1/products/vertexai/go_FeatureOnlineStoreFeatureview.yaml index 708d52d55ffa..6c87c539f517 100644 --- a/mmv1/products/vertexai/go_FeatureOnlineStoreFeatureview.yaml +++ b/mmv1/products/vertexai/go_FeatureOnlineStoreFeatureview.yaml @@ -70,6 +70,7 @@ examples: min_version: 'beta' vars: name: 'example_feature_view_vector_search' + skip_vcr: true parameters: - name: 'featureOnlineStore' type: String @@ -106,7 +107,6 @@ properties: type: KeyValueLabels description: | A set of key/value label pairs to assign to this FeatureView. - immutable: false - name: 'syncConfig' type: NestedObject description: | diff --git a/mmv1/products/vertexai/go_Featurestore.yaml b/mmv1/products/vertexai/go_Featurestore.yaml index 1b7761c299fb..943750f30f24 100644 --- a/mmv1/products/vertexai/go_Featurestore.yaml +++ b/mmv1/products/vertexai/go_Featurestore.yaml @@ -140,7 +140,6 @@ properties: type: KeyValueLabels description: | A set of key/value label pairs to assign to this Featurestore. - immutable: false - name: 'onlineServingConfig' type: NestedObject description: | diff --git a/mmv1/products/vertexai/go_FeaturestoreEntitytype.yaml b/mmv1/products/vertexai/go_FeaturestoreEntitytype.yaml index 64d75b1c724d..6099e1f17af2 100644 --- a/mmv1/products/vertexai/go_FeaturestoreEntitytype.yaml +++ b/mmv1/products/vertexai/go_FeaturestoreEntitytype.yaml @@ -128,7 +128,6 @@ properties: type: KeyValueLabels description: | A set of key/value label pairs to assign to this EntityType. - immutable: false - name: 'monitoringConfig' type: NestedObject description: | diff --git a/mmv1/products/vertexai/go_FeaturestoreEntitytypeFeature.yaml b/mmv1/products/vertexai/go_FeaturestoreEntitytypeFeature.yaml index 6e905f971af4..54ff7b3d9d87 100644 --- a/mmv1/products/vertexai/go_FeaturestoreEntitytypeFeature.yaml +++ b/mmv1/products/vertexai/go_FeaturestoreEntitytypeFeature.yaml @@ -105,7 +105,6 @@ properties: type: KeyValueLabels description: | A set of key/value label pairs to assign to the feature. - immutable: false - name: 'description' type: String description: Description of the feature. diff --git a/mmv1/products/vertexai/go_Index.yaml b/mmv1/products/vertexai/go_Index.yaml index 87eec4dd6cd7..af0331caf14e 100644 --- a/mmv1/products/vertexai/go_Index.yaml +++ b/mmv1/products/vertexai/go_Index.yaml @@ -223,7 +223,6 @@ properties: - name: 'labels' type: KeyValueLabels description: The labels with user-defined metadata to organize your Indexes. - immutable: false - name: 'createTime' type: String description: diff --git a/mmv1/products/vertexai/go_IndexEndpoint.yaml b/mmv1/products/vertexai/go_IndexEndpoint.yaml index 8aa6b8c557fe..dedbabb025b2 100644 --- a/mmv1/products/vertexai/go_IndexEndpoint.yaml +++ b/mmv1/products/vertexai/go_IndexEndpoint.yaml @@ -94,7 +94,6 @@ properties: - name: 'labels' type: KeyValueLabels description: The labels with user-defined metadata to organize your Indexes. - immutable: false - name: 'createTime' type: String description: The timestamp of when the Index was created in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. diff --git a/mmv1/products/vertexai/go_IndexEndpointDeployedIndex.yaml b/mmv1/products/vertexai/go_IndexEndpointDeployedIndex.yaml new file mode 100644 index 000000000000..4708d17edef2 --- /dev/null +++ b/mmv1/products/vertexai/go_IndexEndpointDeployedIndex.yaml @@ -0,0 +1,297 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'IndexEndpointDeployedIndex' +description: |- + An endpoint indexes are deployed into. An index endpoint can have multiple deployed indexes. +references: + guides: + api: 'https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.indexEndpoints#DeployedIndex' +docs: +id_format: '{{index_endpoint}}/deployedIndex/{{deployed_index_id}}' +base_url: '{{index_endpoint}}' +self_link: '{{index_endpoint}}' +create_url: '{{index_endpoint}}:deployIndex' +update_url: '{{index_endpoint}}:mutateDeployedIndex' +update_verb: 'POST' +delete_url: '{{index_endpoint}}:undeployIndex' +delete_verb: 'POST' +import_format: + - 'projects/{{project}}/locations/{{region}}/indexEndpoints/{{index_endpoint}}/deployedIndex/{{deployed_index_id}}' +timeouts: + insert_minutes: 45 + update_minutes: 45 + delete_minutes: 20 +async: + actions: ['create', 'update', 'delete'] + type: 'OpAsync' + operation: + base_url: '{{op_id}}' + path: 'name' + wait_ms: 1000 + timeouts: + insert_minutes: 45 + update_minutes: 45 + delete_minutes: 20 + result: + path: 'response' + resource_inside_response: true + error: + path: 'error' + message: 'message' + include_project: true +custom_code: + encoder: 'templates/terraform/encoders/go/vertex_ai_index_endpoint_deployed_index.go.tmpl' + update_encoder: 'templates/terraform/update_encoder/go/vertex_ai_index_endpoint_deployed_index.go.tmpl' + decoder: 'templates/terraform/decoders/go/vertex_ai_index_endpoint_deployed_index.go.tmpl' + pre_delete: 'templates/terraform/pre_delete/go/vertex_ai_index_endpoint_deployed_index.go.tmpl' + custom_import: 'templates/terraform/custom_import/go/vertex_ai_index_endpoint_deployed_index.go.tmpl' +examples: + - name: 'vertex_ai_index_endpoint_deployed_index_basic' + primary_resource_id: 'basic_deployed_index' + vars: + endpoint_name: 'endpoint-name' + network_name: 'network-name' + deployed_index_id: 'deployed_index_id' + display_name: 'vertex-deployed-index' + display_name_index: 'test-index' + bucket_name: 'bucket-name' + service_account_id: 'vertex-sa' + address_name: 'vertex-ai-range' + test_vars_overrides: + 'network_name': 'acctest.BootstrapSharedServiceNetworkingConnection(t, "vpc-network-1")' + 'address_name': 'acctest.BootstrapSharedTestGlobalAddress(t, "vpc-network-1", acctest.AddressWithPrefixLength(8))' + - name: 'vertex_ai_index_endpoint_deployed_index_basic_two' + primary_resource_id: 'basic_deployed_index' + vars: + endpoint_name: 'endpoint-name' + network_name: 'network-name' + deployed_index_id: 'deployed_index_id' + display_name: 'vertex-deployed-index' + display_name_index: 'test-index' + bucket_name: 'bucket-name' + service_account_id: 'vertex-sa' + address_name: 'vertex-ai-range' + test_vars_overrides: + 'network_name': 'acctest.BootstrapSharedServiceNetworkingConnection(t, "vpc-network-1")' + 'address_name': 'acctest.BootstrapSharedTestGlobalAddress(t, "vpc-network-1", acctest.AddressWithPrefixLength(8))' + - name: 'vertex_ai_index_endpoint_deployed_index_dedicated_resources' + primary_resource_id: 'dedicated_resources' + vars: + display_name_index: 'test-index' + bucket_name: 'bucket-name' + display_name: 'vertex-deployed-index' + deployed_index_id: 'deployed_index_id' + skip_docs: true + - name: 'vertex_ai_index_endpoint_deployed_index_automatic_resources' + primary_resource_id: 'automatic_resources' + vars: + display_name_index: 'test-index' + bucket_name: 'bucket-name' + display_name: 'vertex-deployed-index' + deployed_index_id: 'deployed_index_id' + skip_docs: true +parameters: + - name: 'indexEndpoint' + type: ResourceRef + description: | + Identifies the index endpoint. Must be in the format + 'projects/{{project}}/locations/{{region}}/indexEndpoints/{{indexEndpoint}}' + url_param_only: true + required: true + immutable: true + resource: 'IndexEndpoint' + imports: 'name' +properties: + - name: 'name' + type: String + description: | + The name of the DeployedIndex resource. + output: true + - name: 'deployedIndexId' + type: String + description: The user specified ID of the DeployedIndex. The ID can be up to 128 characters long and must start with a letter and only contain letters, numbers, and underscores. The ID must be unique within the project it is created in. + required: true + immutable: true + - name: 'index' + type: String + description: The name of the Index this is the deployment of. + required: true + immutable: true + diff_suppress_func: 'tpgresource.ProjectNumberDiffSuppress' + - name: 'displayName' + type: String + description: The display name of the Index. The name can be up to 128 characters long and can consist of any UTF-8 characters. + immutable: true + - name: 'createTime' + type: String + description: The timestamp of when the Index was created in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + output: true + - name: 'privateEndpoints' + type: NestedObject + description: Provides paths for users to send requests directly to the deployed index services running on Cloud via private services access. This field is populated if [network](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.indexEndpoints#IndexEndpoint.FIELDS.network) is configured. + output: true + properties: + - name: 'matchGrpcAddress' + type: String + description: The ip address used to send match gRPC requests. + output: true + - name: 'serviceAttachment' + type: String + description: The name of the service attachment resource. Populated if private service connect is enabled. + output: true + - name: 'pscAutomatedEndpoints' + type: Array + description: | + PscAutomatedEndpoints is populated if private service connect is enabled if PscAutomatedConfig is set. + output: true + item_type: + type: NestedObject + properties: + - name: 'projectId' + type: String + description: | + Corresponding projectId in pscAutomationConfigs + output: true + - name: 'network' + type: String + description: | + Corresponding network in pscAutomationConfigs. + output: true + - name: 'matchAddress' + type: String + description: | + ip Address created by the automated forwarding rule. + output: true + - name: 'indexSyncTime' + type: String + description: | + The DeployedIndex may depend on various data on its original Index. Additionally when certain changes to the original Index are being done (e.g. when what the Index contains is being changed) the DeployedIndex may be asynchronously updated in the background to reflect these changes. If this timestamp's value is at least the [Index.update_time](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.indexes#Index.FIELDS.update_time) of the original Index, it means that this DeployedIndex and the original Index are in sync. If this timestamp is older, then to see which updates this DeployedIndex already contains (and which it does not), one must [list](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/projects.locations.operations/list#google.longrunning.Operations.ListOperations) the operations that are running on the original Index. Only the successfully completed Operations with updateTime equal or before this sync time are contained in this DeployedIndex. + + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + output: true + - name: 'automaticResources' + type: NestedObject + description: | + A description of resources that the DeployedIndex uses, which to large degree are decided by Vertex AI, and optionally allows only a modest additional configuration. + + # Note: Having the fields within automaticResouces not being marked as immutable was done in order to support the ability to update such fields. See : https://github.com/GoogleCloudPlatform/magic-modules/pull/11039#issuecomment-2209316648 + default_from_api: true + properties: + - name: 'minReplicaCount' + type: Integer + description: | + The minimum number of replicas this DeployedModel will be always deployed on. If minReplicaCount is not set, the default value is 2 (we don't provide SLA when minReplicaCount=1). + + If traffic against it increases, it may dynamically be deployed onto more replicas up to [maxReplicaCount](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/AutomaticResources#FIELDS.max_replica_count), and as traffic decreases, some of these extra replicas may be freed. If the requested value is too large, the deployment will error. + default_from_api: true + - name: 'maxReplicaCount' + type: Integer + description: | + The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If maxReplicaCount is not set, the default value is minReplicaCount. The max allowed replica count is 1000. + + The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, a no upper bound for scaling under heavy traffic will be assume, though Vertex AI may be unable to scale beyond certain replica number. + default_from_api: true + - name: 'dedicatedResources' + type: NestedObject + description: | + A description of resources that are dedicated to the DeployedIndex, and that need a higher degree of manual configuration. The field minReplicaCount must be set to a value strictly greater than 0, or else validation will fail. We don't provide SLA when minReplicaCount=1. If maxReplicaCount is not set, the default value is minReplicaCount. The max allowed replica count is 1000. + + Available machine types for SMALL shard: e2-standard-2 and all machine types available for MEDIUM and LARGE shard. + + Available machine types for MEDIUM shard: e2-standard-16 and all machine types available for LARGE shard. + + Available machine types for LARGE shard: e2-highmem-16, n2d-standard-32. + + n1-standard-16 and n1-standard-32 are still available, but we recommend e2-standard-16 and e2-highmem-16 for cost efficiency. + + # Having fields within dedicatedResources not being marked as immutable as well as removing + # fields such as acceleratorType, acceleratorCount, tpuTopology was done in order to support the ability to update such fields. This is discussed extensively [here](https://github.com/GoogleCloudPlatform/magic-modules/pull/11039#issuecomment-2209316648). + properties: + - name: 'machineSpec' + type: NestedObject + description: The minimum number of replicas this DeployedModel will be always deployed on. + # This field (and its nested fields) is not updatable via the mutateDeployedIndex method in the API, which is the only update method usable by this 'fine-grained' resource. This is why the field is marked as immutable despite the API docs not marking the field as immutable. + required: true + immutable: true + properties: + - name: 'machineType' + type: String + description: | + The type of the machine. + + See the [list of machine types supported for prediction](https://cloud.google.com/vertex-ai/docs/predictions/configure-compute#machine-types) + + See the [list of machine types supported for custom training](https://cloud.google.com/vertex-ai/docs/training/configure-compute#machine-types). + + For [DeployedModel](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.endpoints#DeployedModel) this field is optional, and the default value is n1-standard-2. For [BatchPredictionJob](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#BatchPredictionJob) or as part of [WorkerPoolSpec](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/CustomJobSpec#WorkerPoolSpec) this field is required. + immutable: true + - name: 'minReplicaCount' + type: Integer + description: The minimum number of machine replicas this DeployedModel will be always deployed on. This value must be greater than or equal to 1. + required: true + - name: 'maxReplicaCount' + type: Integer + description: The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If maxReplicaCount is not set, the default value is minReplicaCount + default_from_api: true + - name: 'enableAccessLogging' + type: Boolean + description: If true, private endpoint's access logs are sent to Cloud Logging. + # This field is not updatable via the mutateDeployedIndex method in the API, which is the only update method usable by this 'fine-grained' resource. This is why the field is marked as immutable despite the API docs not marking the field as immutable. + immutable: true + default_value: false + - name: 'deployedIndexAuthConfig' + type: NestedObject + description: If set, the authentication is enabled for the private endpoint. + # This field (and its nested fields) is not updatable via the mutateDeployedIndex method in the API, which is the only update method usable by this 'fine-grained' resource. This is why the field is marked as immutable despite the API docs not marking the field as immutable. + immutable: true + properties: + - name: 'authProvider' + type: NestedObject + description: Defines the authentication provider that the DeployedIndex uses. + properties: + - name: 'audiences' + type: Array + description: The list of JWT audiences. that are allowed to access. A JWT containing any of these audiences will be accepted. + immutable: true + item_type: + type: String + - name: 'allowedIssuers' + type: Array + description: | + A list of allowed JWT issuers. Each entry must be a valid Google service account, in the following format: service-account-name@project-id.iam.gserviceaccount.com + immutable: true + item_type: + type: String + - name: 'reservedIpRanges' + type: Array + description: | + A list of reserved ip ranges under the VPC network that can be used for this DeployedIndex. + If set, we will deploy the index within the provided ip ranges. Otherwise, the index might be deployed to any ip ranges under the provided VPC network. + + The value should be the name of the address (https://cloud.google.com/compute/docs/reference/rest/v1/addresses) Example: ['vertex-ai-ip-range']. + + For more information about subnets and network IP ranges, please see https://cloud.google.com/vpc/docs/subnets#manually_created_subnet_ip_ranges. + immutable: true + item_type: + type: String + - name: 'deploymentGroup' + type: String + description: | + The deployment group can be no longer than 64 characters (eg: 'test', 'prod'). If not set, we will use the 'default' deployment group. + Creating deployment_groups with reserved_ip_ranges is a recommended practice when the peered network has multiple peering ranges. This creates your deployments from predictable IP spaces for easier traffic administration. Also, one deployment_group (except 'default') can only be used with the same reserved_ip_ranges which means if the deployment_group has been used with reserved_ip_ranges: [a, b, c], using it with [a, b] or [d, e] is disallowed. [See the official documentation here](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.indexEndpoints#DeployedIndex.FIELDS.deployment_group). + Note: we only support up to 5 deployment groups (not including 'default'). + immutable: true + default_value: "default" diff --git a/mmv1/products/vertexai/go_Tensorboard.yaml b/mmv1/products/vertexai/go_Tensorboard.yaml index d4a99e5190bc..ba54d07256e7 100644 --- a/mmv1/products/vertexai/go_Tensorboard.yaml +++ b/mmv1/products/vertexai/go_Tensorboard.yaml @@ -116,4 +116,3 @@ properties: type: KeyValueLabels description: | The labels with user-defined metadata to organize your Tensorboards. - immutable: false diff --git a/mmv1/products/vmwareengine/go_ExternalAccessRule.yaml b/mmv1/products/vmwareengine/go_ExternalAccessRule.yaml index b6fd372d0737..85a298620059 100644 --- a/mmv1/products/vmwareengine/go_ExternalAccessRule.yaml +++ b/mmv1/products/vmwareengine/go_ExternalAccessRule.yaml @@ -47,8 +47,8 @@ async: path: 'error' message: 'message' include_project: true -skip_sweeper: true custom_code: +skip_sweeper: true examples: - name: 'vmware_engine_external_access_rule_basic' primary_resource_id: 'vmw-engine-external-access-rule' diff --git a/mmv1/products/vmwareengine/go_ExternalAddress.yaml b/mmv1/products/vmwareengine/go_ExternalAddress.yaml index c853ea1348e6..2b8395be5be4 100644 --- a/mmv1/products/vmwareengine/go_ExternalAddress.yaml +++ b/mmv1/products/vmwareengine/go_ExternalAddress.yaml @@ -51,9 +51,10 @@ async: path: 'error' message: 'message' include_project: true -skip_sweeper: true custom_code: +skip_sweeper: true error_retry_predicates: + - 'transport_tpg.ExternalIpServiceNotActive' examples: - name: 'vmware_engine_external_address_basic' diff --git a/mmv1/products/vmwareengine/go_PrivateCloud.yaml b/mmv1/products/vmwareengine/go_PrivateCloud.yaml index 95ad9731938c..1c52a4c13760 100644 --- a/mmv1/products/vmwareengine/go_PrivateCloud.yaml +++ b/mmv1/products/vmwareengine/go_PrivateCloud.yaml @@ -52,6 +52,7 @@ custom_code: constants: 'templates/terraform/constants/go/vmwareengine_private_cloud.go.tmpl' update_encoder: 'templates/terraform/update_encoder/go/private_cloud.go.tmpl' decoder: 'templates/terraform/decoders/go/private_cloud.go.tmpl' + pre_create: 'templates/terraform/pre_create/go/vmwareengine_private_cloud.go.tmpl' post_delete: 'templates/terraform/post_delete/go/private_cloud.go.tmpl' post_update: 'templates/terraform/post_update/go/private_cloud.go.tmpl' pre_delete: 'templates/terraform/pre_delete/go/vmwareengine_private_cloud.go.tmpl' diff --git a/mmv1/products/vpcaccess/go_Connector.yaml b/mmv1/products/vpcaccess/go_Connector.yaml index f79e6156459d..fded7434edb2 100644 --- a/mmv1/products/vpcaccess/go_Connector.yaml +++ b/mmv1/products/vpcaccess/go_Connector.yaml @@ -115,21 +115,27 @@ properties: Minimum throughput of the connector in Mbps. Default and min is 200. Refers to the expected throughput when using an e2-micro machine type. Value must be a multiple of 100 from 200 through 900. Must be lower than the value specified by max_throughput. If both min_throughput and min_instances are provided, min_instances takes precedence over min_throughput. The use of min_throughput is discouraged in favor of min_instances. + default_from_api: true + conflicts: + - min_instances validation: function: 'validation.IntBetween(200, 1000)' - default_value: 200 - name: 'minInstances' type: Integer description: | Minimum value of instances in autoscaling group underlying the connector. Value must be between 2 and 9, inclusive. Must be lower than the value specified by max_instances. default_from_api: true + conflicts: + - min_throughput - name: 'maxInstances' type: Integer description: | Maximum value of instances in autoscaling group underlying the connector. Value must be between 3 and 10, inclusive. Must be higher than the value specified by min_instances. default_from_api: true + conflicts: + - max_throughput - name: 'maxThroughput' type: Integer description: | @@ -137,9 +143,11 @@ properties: when using an e2-micro machine type. Value must be a multiple of 100 from 300 through 1000. Must be higher than the value specified by min_throughput. If both max_throughput and max_instances are provided, max_instances takes precedence over max_throughput. The use of max_throughput is discouraged in favor of max_instances. + default_from_api: true + conflicts: + - max_instances validation: function: 'validation.IntBetween(200, 1000)' - default_value: 300 - name: 'selfLink' type: String description: | diff --git a/mmv1/products/workflows/go_Workflow.yaml b/mmv1/products/workflows/go_Workflow.yaml new file mode 100644 index 000000000000..dc534c04bd15 --- /dev/null +++ b/mmv1/products/workflows/go_Workflow.yaml @@ -0,0 +1,141 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'Workflow' +description: | + Workflow program to be executed by Workflows. +references: + guides: + 'Managing Workflows': 'https://cloud.google.com/workflows/docs/creating-updating-workflow' + api: 'https://cloud.google.com/workflows/docs/reference/rest/v1/projects.locations.workflows' +docs: + optional_properties: | + * `name_prefix` - (Optional) Creates a unique name beginning with the + specified prefix. If this and name are unspecified, a random value is chosen for the name. +id_format: 'projects/{{project}}/locations/{{region}}/workflows/{{name}}' +base_url: 'projects/{{project}}/locations/{{region}}/workflows' +self_link: 'projects/{{project}}/locations/{{region}}/workflows/{{name}}' +create_url: 'projects/{{project}}/locations/{{region}}/workflows?workflowId={{name}}' +update_verb: 'PATCH' +update_mask: true +exclude_import: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +autogen_async: true +async: + actions: ['create', 'delete', 'update'] + type: 'OpAsync' + operation: + base_url: '{{op_id}}' + path: 'name' + wait_ms: 1000 + result: + path: 'response' + resource_inside_response: true + error: + path: 'error' + message: 'message' +custom_code: + extra_schema_entry: 'templates/terraform/extra_schema_entry/go/workflow.tmpl' + encoder: 'templates/terraform/encoders/go/workflow.go.tmpl' +schema_version: 1 +state_upgraders: true +examples: + - name: 'workflow_basic' + primary_resource_id: 'example' + vars: + name: 'workflow' + account_id: 'my-account' + skip_import_test: true +parameters: + - name: 'region' + type: String + description: The region of the workflow. + url_param_only: true + immutable: true +properties: + - name: 'name' + type: String + description: Name of the Workflow. + immutable: true + default_from_api: true + custom_flatten: 'templates/terraform/custom_flatten/go/name_from_self_link.tmpl' + - name: 'description' + type: String + description: | + Description of the workflow provided by the user. Must be at most 1000 unicode characters long. + default_from_api: true + - name: 'createTime' + type: String + description: | + The timestamp of when the workflow was created in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + output: true + - name: 'updateTime' + type: String + description: | + The timestamp of when the workflow was last updated in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + output: true + - name: 'state' + type: String + description: State of the workflow deployment. + output: true + - name: 'labels' + type: KeyValueLabels + description: | + A set of key/value label pairs to assign to this Workflow. + - name: 'serviceAccount' + type: String + description: | + Name of the service account associated with the latest workflow version. This service + account represents the identity of the workflow and determines what permissions the workflow has. + Format: projects/{project}/serviceAccounts/{account} or {account}. + Using - as a wildcard for the {project} or not providing one at all will infer the project from the account. + The {account} value can be the email address or the unique_id of the service account. + If not provided, workflow will use the project's default service account. + Modifying this field for an existing workflow results in a new workflow revision. + default_from_api: true + diff_suppress_func: 'tpgresource.CompareSelfLinkOrResourceName' + - name: 'sourceContents' + type: String + description: | + Workflow code to be executed. The size limit is 128KB. + - name: 'revisionId' + type: String + description: | + The revision of the workflow. A new one is generated if the service account or source contents is changed. + output: true + - name: 'cryptoKeyName' + type: String + description: | + The KMS key used to encrypt workflow and execution data. + + Format: projects/{project}/locations/{location}/keyRings/{keyRing}/cryptoKeys/{cryptoKey} + - name: 'callLogLevel' + type: Enum + description: | + Describes the level of platform logging to apply to calls and call responses during + executions of this workflow. If both the workflow and the execution specify a logging level, + the execution level takes precedence. + enum_values: + - 'CALL_LOG_LEVEL_UNSPECIFIED' + - 'LOG_ALL_CALLS' + - 'LOG_ERRORS_ONLY' + - 'LOG_NONE' + - name: 'userEnvVars' + type: KeyValuePairs + description: | + User-defined environment variables associated with this workflow revision. This map has a maximum length of 20. Each string can take up to 4KiB. Keys cannot be empty strings and cannot start with “GOOGLE” or “WORKFLOWS". diff --git a/mmv1/products/workflows/go_product.yaml b/mmv1/products/workflows/go_product.yaml new file mode 100644 index 000000000000..a8bd13a3689f --- /dev/null +++ b/mmv1/products/workflows/go_product.yaml @@ -0,0 +1,36 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'Workflows' +display_name: 'Workflows' +versions: + - name: 'beta' + base_url: 'https://workflows.googleapis.com/v1/' + - name: 'ga' + base_url: 'https://workflows.googleapis.com/v1/' +scopes: + - 'https://www.googleapis.com/auth/cloud-platform' +async: + type: "OpAsync" + operation: + base_url: '{{op_id}}' + path: 'name' + wait_ms: 1000 + result: + path: 'response' + resource_inside_response: true + error: + path: 'error' + message: 'message' diff --git a/mmv1/products/workstations/go_Workstation.yaml b/mmv1/products/workstations/go_Workstation.yaml new file mode 100644 index 000000000000..0ee998625140 --- /dev/null +++ b/mmv1/products/workstations/go_Workstation.yaml @@ -0,0 +1,162 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'Workstation' +description: + 'A single instance of a developer workstation with its own persistent storage.' +min_version: 'beta' +references: + guides: + 'Workstations': 'https://cloud.google.com/workstations/docs/' + api: 'https://cloud.google.com/workstations/docs/reference/rest/v1beta/projects.locations.workstationClusters.workstationConfigs.workstations' +docs: +base_url: 'projects/{{project}}/locations/{{location}}/workstationClusters/{{workstation_cluster_id}}/workstationConfigs/{{workstation_config_id}}/workstations' +self_link: 'projects/{{project}}/locations/{{location}}/workstationClusters/{{workstation_cluster_id}}/workstationConfigs/{{workstation_config_id}}/workstations/{{workstation_id}}' +create_url: 'projects/{{project}}/locations/{{location}}/workstationClusters/{{workstation_cluster_id}}/workstationConfigs/{{workstation_config_id}}/workstations?workstationId={{workstation_id}}' +update_verb: 'PATCH' +update_mask: true +import_format: + - 'projects/{{project}}/locations/{{location}}/workstationClusters/{{workstation_cluster_id}}/workstationConfigs/{{workstation_config_id}}/workstations/{{workstation_id}}' +timeouts: + insert_minutes: 30 + update_minutes: 30 + delete_minutes: 30 +autogen_async: true +async: + actions: ['create', 'delete', 'update'] + type: 'OpAsync' + operation: + base_url: '{{op_id}}' + path: 'name' + wait_ms: 1000 + timeouts: + insert_minutes: 30 + update_minutes: 30 + delete_minutes: 30 + result: + path: 'response' + resource_inside_response: false + error: + path: 'error' + message: 'message' +iam_policy: + method_name_separator: ':' + parent_resource_attribute: 'workstation_id' + example_config_body: 'templates/terraform/iam/go/iam_attributes.go.tmpl' + import_format: + - 'projects/{{project}}/locations/{{location}}/workstationClusters/{{workstation_cluster_id}}/workstationConfigs/{{workstation_config_id}}/workstations/{{workstation_id}}' + - '{{workstation_id}}' + min_version: 'beta' +custom_code: +examples: + - name: 'workstation_basic' + primary_resource_id: 'default' + primary_resource_name: 'fmt.Sprintf("tf-test-workstation-cluster%s", context["random_suffix"]), fmt.Sprintf("tf-test-workstation-config%s", context["random_suffix"]), fmt.Sprintf("tf-test-work-station%s", context["random_suffix"])' + min_version: 'beta' + vars: + workstation_cluster_name: 'workstation-cluster' + workstation_config_name: 'workstation-config' + workstation_name: 'work-station' +parameters: + - name: 'workstationId' + type: String + description: | + ID to use for the workstation. + min_version: 'beta' + url_param_only: true + required: true + immutable: true + - name: 'workstationConfigId' + type: String + description: | + The ID of the parent workstation cluster config. + min_version: 'beta' + url_param_only: true + required: true + immutable: true + - name: 'workstationClusterId' + type: String + description: | + The ID of the parent workstation cluster. + min_version: 'beta' + url_param_only: true + required: true + immutable: true + - name: 'location' + type: String + description: | + The location where the workstation parent resources reside. + min_version: 'beta' + url_param_only: true + required: true + immutable: true +properties: + - name: 'name' + type: String + description: | + Full name of this resource. + min_version: 'beta' + output: true + - name: 'uid' + type: String + description: | + A system-assigned unique identified for this resource. + min_version: 'beta' + output: true + - name: 'displayName' + type: String + description: | + Human-readable name for this resource. + min_version: 'beta' + - name: 'labels' + type: KeyValueLabels + description: + 'Client-specified labels that are applied to the resource and that are + also propagated to the underlying Compute Engine resources.' + min_version: 'beta' + - name: 'annotations' + type: KeyValueAnnotations + description: 'Client-specified annotations. This is distinct from labels.' + min_version: 'beta' + - name: 'env' + type: KeyValuePairs + description: | + 'Client-specified environment variables passed to the workstation container's entrypoint.' + min_version: 'beta' + - name: 'createTime' + type: Time + description: | + Time when this resource was created. + min_version: 'beta' + output: true + - name: 'host' + type: String + description: | + Host to which clients can send HTTPS traffic that will be received by the workstation. + Authorized traffic will be received to the workstation as HTTP on port 80. + To send traffic to a different port, clients may prefix the host with the destination port in the format "{port}-{host}". + min_version: 'beta' + output: true + - name: 'state' + type: Enum + description: | + Current state of the workstation. + min_version: 'beta' + output: true + enum_values: + - 'STATE_STARTING' + - 'STATE_RUNNING' + - 'STATE_STOPPING' + - 'STATE_STOPPED' diff --git a/mmv1/products/workstations/go_WorkstationCluster.yaml b/mmv1/products/workstations/go_WorkstationCluster.yaml new file mode 100644 index 000000000000..e602afbc048e --- /dev/null +++ b/mmv1/products/workstations/go_WorkstationCluster.yaml @@ -0,0 +1,237 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'WorkstationCluster' +description: "A grouping of workstation configurations and the associated workstations in that region." +min_version: 'beta' +references: + guides: + 'Workstations': 'https://cloud.google.com/workstations/docs/' + api: 'https://cloud.google.com/workstations/docs/reference/rest/v1beta/projects.locations.workstationClusters/create' +docs: +base_url: 'projects/{{project}}/locations/{{location}}/workstationClusters' +self_link: 'projects/{{project}}/locations/{{location}}/workstationClusters/{{workstation_cluster_id}}' +create_url: 'projects/{{project}}/locations/{{location}}/workstationClusters?workstationClusterId={{workstation_cluster_id}}' +update_verb: 'PATCH' +update_mask: true +import_format: + - 'projects/{{project}}/locations/{{location}}/workstationClusters/{{workstation_cluster_id}}' +timeouts: + insert_minutes: 60 + update_minutes: 60 + delete_minutes: 60 +autogen_async: true +async: + actions: ['create', 'delete', 'update'] + type: 'OpAsync' + operation: + base_url: '{{op_id}}' + path: 'name' + wait_ms: 1000 + timeouts: + insert_minutes: 60 + update_minutes: 60 + delete_minutes: 60 + result: + path: 'response' + resource_inside_response: false + error: + path: 'error' + message: 'message' +custom_code: +examples: + - name: 'workstation_cluster_basic' + primary_resource_id: 'default' + min_version: 'beta' + vars: + workstation_cluster_name: 'workstation-cluster' + - name: 'workstation_cluster_private' + primary_resource_id: 'default' + min_version: 'beta' + vars: + workstation_cluster_name: 'workstation-cluster-private' + - name: 'workstation_cluster_custom_domain' + primary_resource_id: 'default' + min_version: 'beta' + vars: + workstation_cluster_name: 'workstation-cluster-custom-domain' +parameters: + - name: 'workstationClusterId' + type: String + description: | + ID to use for the workstation cluster. + min_version: 'beta' + url_param_only: true + required: true + immutable: true + - name: 'location' + type: String + description: | + The location where the workstation cluster should reside. + min_version: 'beta' + url_param_only: true + immutable: true +properties: + - name: 'name' + type: String + description: | + The name of the cluster resource. + min_version: 'beta' + output: true + - name: 'uid' + type: String + description: | + The system-generated UID of the resource. + min_version: 'beta' + output: true + - name: 'labels' + type: KeyValueLabels + description: + "Client-specified labels that are applied to the resource and that are + also propagated to the underlying Compute Engine resources." + min_version: 'beta' + - name: 'network' + type: String + description: | + The relative resource name of the VPC network on which the instance can be accessed. + It is specified in the following form: "projects/{projectNumber}/global/networks/{network_id}". + min_version: 'beta' + required: true + immutable: true + - name: 'subnetwork' + type: String + description: | + Name of the Compute Engine subnetwork in which instances associated with this cluster will be created. + Must be part of the subnetwork specified for this cluster. + min_version: 'beta' + required: true + immutable: true + - name: 'controlPlaneIp' + type: String + description: | + The private IP address of the control plane for this workstation cluster. + Workstation VMs need access to this IP address to work with the service, so make sure that your firewall rules allow egress from the workstation VMs to this address. + min_version: 'beta' + output: true + - name: 'displayName' + type: String + description: | + Human-readable name for this resource. + min_version: 'beta' + - name: 'degraded' + type: Boolean + description: | + Whether this resource is in degraded mode, in which case it may require user action to restore full functionality. + Details can be found in the conditions field. + min_version: 'beta' + output: true + - name: 'annotations' + type: KeyValueAnnotations + description: "Client-specified annotations. This is distinct from labels." + min_version: 'beta' + - name: 'etag' + type: Fingerprint + description: | + Checksum computed by the server. + May be sent on update and delete requests to ensure that the client has an up-to-date value before proceeding. + min_version: 'beta' + output: true + - name: 'createTime' + type: Time + description: | + Time when this resource was created. + min_version: 'beta' + output: true + - name: 'privateClusterConfig' + type: NestedObject + description: | + Configuration for private cluster. + min_version: 'beta' + properties: + - name: 'enablePrivateEndpoint' + type: Boolean + description: | + Whether Workstations endpoint is private. + min_version: 'beta' + required: true + immutable: true + - name: 'clusterHostname' + type: String + description: | + Hostname for the workstation cluster. + This field will be populated only when private endpoint is enabled. + To access workstations in the cluster, create a new DNS zone mapping this domain name to an internal IP address and a forwarding rule mapping that address to the service attachment. + min_version: 'beta' + output: true + - name: 'serviceAttachmentUri' + type: String + description: | + Service attachment URI for the workstation cluster. + The service attachment is created when private endpoint is enabled. + To access workstations in the cluster, configure access to the managed service using (Private Service Connect)[https://cloud.google.com/vpc/docs/configure-private-service-connect-services]. + min_version: 'beta' + output: true + - name: 'allowedProjects' + type: Array + description: | + Additional project IDs that are allowed to attach to the workstation cluster's service attachment. + By default, the workstation cluster's project and the VPC host project (if different) are allowed. + min_version: 'beta' + default_from_api: true + item_type: + type: String + - name: 'domainConfig' + type: NestedObject + description: | + Configuration options for a custom domain. + min_version: 'beta' + properties: + - name: 'domain' + type: String + description: | + Domain used by Workstations for HTTP ingress. + + min_version: 'beta' + required: true + immutable: true + - name: 'conditions' + type: Array + description: |- + Status conditions describing the current resource state. + min_version: 'beta' + output: true + item_type: + type: NestedObject + properties: + - name: 'code' + type: Integer + description: |- + The status code, which should be an enum value of google.rpc.Code. + min_version: 'beta' + output: true + - name: 'message' + type: String + description: |- + Human readable message indicating details about the current status. + min_version: 'beta' + output: true + - name: 'details' + type: Array + description: | + A list of messages that carry the error details. + min_version: 'beta' + output: true + item_type: + type: KeyValuePairs diff --git a/mmv1/products/workstations/go_WorkstationConfig.yaml b/mmv1/products/workstations/go_WorkstationConfig.yaml new file mode 100644 index 000000000000..05372e367a3f --- /dev/null +++ b/mmv1/products/workstations/go_WorkstationConfig.yaml @@ -0,0 +1,680 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'WorkstationConfig' +description: + 'A set of configuration options describing how a workstation will be run. + Workstation configurations are intended to be shared across multiple + workstations.' +min_version: 'beta' +references: + guides: + 'Workstations': 'https://cloud.google.com/workstations/docs/' + api: 'https://cloud.google.com/workstations/docs/reference/rest/v1beta/projects.locations.workstationClusters.workstationConfigs/create' +docs: +base_url: 'projects/{{project}}/locations/{{location}}/workstationClusters/{{workstation_cluster_id}}/workstationConfigs' +self_link: 'projects/{{project}}/locations/{{location}}/workstationClusters/{{workstation_cluster_id}}/workstationConfigs/{{workstation_config_id}}' +create_url: 'projects/{{project}}/locations/{{location}}/workstationClusters/{{workstation_cluster_id}}/workstationConfigs?workstationConfigId={{workstation_config_id}}' +update_verb: 'PATCH' +update_mask: true +import_format: + - 'projects/{{project}}/locations/{{location}}/workstationClusters/{{workstation_cluster_id}}/workstationConfigs/{{workstation_config_id}}' +timeouts: + insert_minutes: 30 + update_minutes: 30 + delete_minutes: 30 +autogen_async: true +async: + actions: ['create', 'delete', 'update'] + type: 'OpAsync' + operation: + base_url: '{{op_id}}' + path: 'name' + wait_ms: 1000 + timeouts: + insert_minutes: 30 + update_minutes: 30 + delete_minutes: 30 + result: + path: 'response' + resource_inside_response: false + error: + path: 'error' + message: 'message' +iam_policy: + method_name_separator: ':' + parent_resource_attribute: 'workstation_config_id' + example_config_body: 'templates/terraform/iam/go/iam_attributes.go.tmpl' + import_format: + - 'projects/{{project}}/locations/{{location}}/workstationClusters/{{workstation_cluster_id}}/workstationConfigs/{{workstation_config_id}}' + - '{{workstation_config_id}}' + min_version: 'beta' +custom_code: +examples: + - name: 'workstation_config_basic' + primary_resource_id: 'default' + primary_resource_name: 'fmt.Sprintf("tf-test-workstation-cluster%s", context["random_suffix"]), fmt.Sprintf("tf-test-workstation-config%s", context["random_suffix"])' + min_version: 'beta' + vars: + workstation_cluster_name: 'workstation-cluster' + workstation_config_name: 'workstation-config' + key_short_name: 'keyname' + value_short_name: 'valuename' + org_id: '123456789' + test_vars_overrides: + 'key_short_name': '"tf-test-key-" + acctest.RandString(t, 10)' + 'value_short_name': '"tf-test-value-" + acctest.RandString(t, 10)' + 'org_id': 'envvar.GetTestOrgFromEnv(t)' + - name: 'workstation_config_container' + primary_resource_id: 'default' + min_version: 'beta' + vars: + workstation_cluster_name: 'workstation-cluster' + workstation_config_name: 'workstation-config' + - name: 'workstation_config_persistent_directories' + primary_resource_id: 'default' + min_version: 'beta' + vars: + workstation_cluster_name: 'workstation-cluster' + workstation_config_name: 'workstation-config' + - name: 'workstation_config_source_snapshot' + primary_resource_id: 'default' + min_version: 'beta' + vars: + workstation_cluster_name: 'workstation-cluster' + workstation_config_name: 'workstation-config' + - name: 'workstation_config_shielded_instance_config' + primary_resource_id: 'default' + min_version: 'beta' + vars: + workstation_cluster_name: 'workstation-cluster' + workstation_config_name: 'workstation-config' + - name: 'workstation_config_accelerators' + primary_resource_id: 'default' + min_version: 'beta' + vars: + workstation_cluster_name: 'workstation-cluster' + workstation_config_name: 'workstation-config' + - name: 'workstation_config_boost' + primary_resource_id: 'default' + min_version: 'beta' + vars: + workstation_cluster_name: 'workstation-cluster' + workstation_config_name: 'workstation-config' + - name: 'workstation_config_encryption_key' + primary_resource_id: 'default' + min_version: 'beta' + vars: + account_id: 'my-account' + workstation_cluster_name: 'workstation-cluster' + workstation_config_name: 'workstation-config' +parameters: + - name: 'workstationConfigId' + type: String + description: | + The ID to be assigned to the workstation cluster config. + min_version: 'beta' + url_param_only: true + required: true + immutable: true + - name: 'workstationClusterId' + type: String + description: | + The ID of the parent workstation cluster. + min_version: 'beta' + url_param_only: true + required: true + immutable: true + - name: 'location' + type: String + description: | + The location where the workstation cluster config should reside. + min_version: 'beta' + url_param_only: true + required: true + immutable: true +properties: + - name: 'name' + type: String + description: | + Full name of this resource. + min_version: 'beta' + output: true + - name: 'uid' + type: String + description: | + The system-generated UID of the resource. + min_version: 'beta' + output: true + - name: 'displayName' + type: String + description: | + Human-readable name for this resource. + min_version: 'beta' + - name: 'labels' + type: KeyValueLabels + description: + 'Client-specified labels that are applied to the resource and that are + also propagated to the underlying Compute Engine resources.' + min_version: 'beta' + - name: 'annotations' + type: KeyValueAnnotations + description: 'Client-specified annotations. This is distinct from labels.' + min_version: 'beta' + - name: 'etag' + type: Fingerprint + description: | + Checksum computed by the server. + May be sent on update and delete requests to ensure that the client has an up-to-date value before proceeding. + min_version: 'beta' + output: true + - name: 'createTime' + type: Time + description: | + Time when this resource was created. + min_version: 'beta' + output: true + - name: 'idleTimeout' + type: String + description: | + How long to wait before automatically stopping an instance that hasn't recently received any user traffic. A value of 0 indicates that this instance should never time out from idleness. Defaults to 20 minutes. + A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". + min_version: 'beta' + default_value: "1200s" + - name: 'runningTimeout' + type: String + description: | + How long to wait before automatically stopping a workstation after it was started. A value of 0 indicates that workstations using this configuration should never time out from running duration. Must be greater than 0 and less than 24 hours if `encryption_key` is set. Defaults to 12 hours. + A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". + min_version: 'beta' + default_value: "43200s" + - name: 'replicaZones' + type: Array + description: | + Specifies the zones used to replicate the VM and disk resources within the region. If set, exactly two zones within the workstation cluster's region must be specified—for example, `['us-central1-a', 'us-central1-f']`. + If this field is empty, two default zones within the region are used. Immutable after the workstation configuration is created. + min_version: 'beta' + immutable: true + default_from_api: true + item_type: + type: String + - name: 'enableAuditAgent' + type: Boolean + description: | + Whether to enable Linux `auditd` logging on the workstation. When enabled, a service account must also be specified that has `logging.buckets.write` permission on the project. Operating system audit logging is distinct from Cloud Audit Logs. + min_version: 'beta' + ignore_read: true + - name: 'host' + type: NestedObject + description: | + Runtime host for a workstation. + min_version: 'beta' + default_from_api: true + update_mask_fields: + - 'host.gceInstance.machineType' + - 'host.gceInstance.poolSize' + - 'host.gceInstance.tags' + - 'host.gceInstance.serviceAccountScopes' + - 'host.gceInstance.disablePublicIpAddresses' + - 'host.gceInstance.enableNestedVirtualization' + - 'host.gceInstance.shieldedInstanceConfig.enableSecureBoot' + - 'host.gceInstance.shieldedInstanceConfig.enableVtpm' + - 'host.gceInstance.shieldedInstanceConfig.enableIntegrityMonitoring' + - 'host.gceInstance.confidentialInstanceConfig.enableConfidentialCompute' + - 'host.gceInstance.accelerators' + - 'host.gceInstance.boostConfigs' + - 'host.gceInstance.disableSsh' + - 'host.gceInstance.vmTags' + properties: + - name: 'gceInstance' + type: NestedObject + description: | + A runtime using a Compute Engine instance. + min_version: 'beta' + default_from_api: true + properties: + - name: 'machineType' + type: String + description: |- + The name of a Compute Engine machine type. + min_version: 'beta' + default_from_api: true + - name: 'serviceAccount' + type: String + description: |- + Email address of the service account that will be used on VM instances used to support this config. This service account must have permission to pull the specified container image. If not set, VMs will run without a service account, in which case the image must be publicly accessible. + min_version: 'beta' + immutable: true + default_from_api: true + - name: 'serviceAccountScopes' + type: Array + description: |- + Scopes to grant to the service_account. Various scopes are automatically added based on feature usage. When specified, users of workstations under this configuration must have `iam.serviceAccounts.actAs` on the service account. + min_version: 'beta' + default_from_api: true + item_type: + type: String + - name: 'poolSize' + type: Integer + description: |- + Number of instances to pool for faster workstation startup. + min_version: 'beta' + default_from_api: true + - name: 'bootDiskSizeGb' + type: Integer + description: |- + Size of the boot disk in GB. + min_version: 'beta' + immutable: true + default_from_api: true + - name: 'tags' + type: Array + description: | + Network tags to add to the Compute Engine machines backing the Workstations. + min_version: 'beta' + item_type: + type: String + - name: 'disablePublicIpAddresses' + type: Boolean + description: | + Whether instances have no public IP address. + min_version: 'beta' + - name: 'disableSsh' + type: Boolean + description: | + Whether to disable SSH access to the VM. + min_version: 'beta' + send_empty_value: true + default_value: true + - name: 'enableNestedVirtualization' + type: Boolean + description: | + Whether to enable nested virtualization on the Compute Engine VMs backing the Workstations. + + See https://cloud.google.com/workstations/docs/reference/rest/v1beta/projects.locations.workstationClusters.workstationConfigs#GceInstance.FIELDS.enable_nested_virtualization + min_version: 'beta' + - name: 'shieldedInstanceConfig' + type: NestedObject + description: | + A set of Compute Engine Shielded instance options. + min_version: 'beta' + default_from_api: true + custom_flatten: 'templates/terraform/custom_flatten/go/workstations_config_shielded_instance.go.tmpl' + properties: + - name: 'enableSecureBoot' + type: Boolean + description: | + Whether the instance has Secure Boot enabled. + min_version: 'beta' + send_empty_value: true + - name: 'enableVtpm' + type: Boolean + description: | + Whether the instance has the vTPM enabled. + min_version: 'beta' + send_empty_value: true + - name: 'enableIntegrityMonitoring' + type: Boolean + description: | + Whether the instance has integrity monitoring enabled. + min_version: 'beta' + send_empty_value: true + - name: 'confidentialInstanceConfig' + type: NestedObject + description: | + A set of Compute Engine Confidential VM instance options. + min_version: 'beta' + default_from_api: true + custom_flatten: 'templates/terraform/custom_flatten/go/workstations_config_confidential_instance.go.tmpl' + properties: + - name: 'enableConfidentialCompute' + type: Boolean + description: | + Whether the instance has confidential compute enabled. + min_version: 'beta' + send_empty_value: true + - name: 'accelerators' + type: Array + description: | + An accelerator card attached to the instance. + min_version: 'beta' + item_type: + type: NestedObject + properties: + - name: 'type' + type: String + description: | + Type of accelerator resource to attach to the instance, for example, "nvidia-tesla-p100". + min_version: 'beta' + required: true + - name: 'count' + type: Integer + description: | + Number of accelerator cards exposed to the instance. + min_version: 'beta' + required: true + - name: 'boostConfigs' + type: Array + description: | + A list of the boost configurations that workstations created using this workstation configuration are allowed to use. + min_version: 'beta' + item_type: + type: NestedObject + properties: + - name: 'id' + type: String + description: | + The id to be used for the boost config. + min_version: 'beta' + required: true + - name: 'machineType' + type: String + description: | + The type of machine that boosted VM instances will use—for example, e2-standard-4. For more information about machine types that Cloud Workstations supports, see the list of available machine types https://cloud.google.com/workstations/docs/available-machine-types. Defaults to e2-standard-4. + min_version: 'beta' + - name: 'bootDiskSizeGb' + type: Integer + description: |- + Size of the boot disk in GB. The minimum boot disk size is `30` GB. Defaults to `50` GB. + min_version: 'beta' + immutable: true + default_from_api: true + - name: 'enableNestedVirtualization' + type: Boolean + description: | + Whether to enable nested virtualization on the Compute Engine VMs backing boosted Workstations. + + See https://cloud.google.com/workstations/docs/reference/rest/v1beta/projects.locations.workstationClusters.workstationConfigs#GceInstance.FIELDS.enable_nested_virtualization + min_version: 'beta' + default_from_api: true + - name: 'poolSize' + type: Integer + description: |- + Number of instances to pool for faster workstation boosting. + min_version: 'beta' + default_from_api: true + - name: 'accelerators' + type: Array + description: | + An accelerator card attached to the boost instance. + min_version: 'beta' + item_type: + type: NestedObject + properties: + - name: 'type' + type: String + description: | + Type of accelerator resource to attach to the instance, for example, "nvidia-tesla-p100". + min_version: 'beta' + required: true + - name: 'count' + type: Integer + description: | + Number of accelerator cards exposed to the instance. + min_version: 'beta' + required: true + - name: 'vmTags' + type: KeyValuePairs + description: | + Resource manager tags to be bound to the VM instances backing the Workstations. + Tag keys and values have the same definition as + https://cloud.google.com/resource-manager/docs/tags/tags-overview + Keys must be in the format `tagKeys/{tag_key_id}`, and + values are in the format `tagValues/456`. + min_version: 'beta' + - name: 'persistentDirectories' + type: Array + description: | + Directories to persist across workstation sessions. + min_version: 'beta' + default_from_api: true + item_type: + type: NestedObject + properties: + - name: 'mountPath' + type: String + description: | + Location of this directory in the running workstation. + min_version: 'beta' + immutable: true + default_from_api: true + - name: 'gcePd' + type: NestedObject + description: | + A directory to persist across workstation sessions, backed by a Compute Engine regional persistent disk. Can only be updated if not empty during creation. + min_version: 'beta' + default_from_api: true + properties: + - name: 'fsType' + type: String + description: | + Type of file system that the disk should be formatted with. The workstation image must support this file system type. Must be empty if `sourceSnapshot` is set. Defaults to `ext4`. + min_version: 'beta' + immutable: true + default_from_api: true + - name: 'diskType' + type: String + description: | + The type of the persistent disk for the home directory. Defaults to `pd-standard`. + min_version: 'beta' + immutable: true + default_from_api: true + - name: 'sizeGb' + type: Integer + description: |- + The GB capacity of a persistent home directory for each workstation created with this configuration. Must be empty if `sourceSnapshot` is set. + Valid values are `10`, `50`, `100`, `200`, `500`, or `1000`. Defaults to `200`. If less than `200` GB, the `diskType` must be `pd-balanced` or `pd-ssd`. + min_version: 'beta' + immutable: true + default_from_api: true + - name: 'reclaimPolicy' + type: Enum + description: | + Whether the persistent disk should be deleted when the workstation is deleted. Valid values are `DELETE` and `RETAIN`. Defaults to `DELETE`. + min_version: 'beta' + enum_values: + - 'DELETE' + - 'RETAIN' + - name: 'sourceSnapshot' + type: String + description: | + Name of the snapshot to use as the source for the disk. This can be the snapshot's `self_link`, `id`, or a string in the format of `projects/{project}/global/snapshots/{snapshot}`. If set, `sizeGb` and `fsType` must be empty. Can only be updated if it has an existing value. + # TODO(esu): Add conflicting fields once complex lists are supported. + min_version: 'beta' + - name: 'ephemeralDirectories' + type: Array + description: | + Ephemeral directories which won't persist across workstation sessions. + min_version: 'beta' + default_from_api: true + item_type: + type: NestedObject + properties: + - name: 'mountPath' + type: String + description: | + Location of this directory in the running workstation. + min_version: 'beta' + default_from_api: true + - name: 'gcePd' + type: NestedObject + description: | + An EphemeralDirectory backed by a Compute Engine persistent disk. + min_version: 'beta' + default_from_api: true + properties: + - name: 'diskType' + type: String + description: | + Type of the disk to use. Defaults to `"pd-standard"`. + min_version: 'beta' + default_from_api: true + - name: 'sourceSnapshot' + type: String + description: | + Name of the snapshot to use as the source for the disk. + + Must be empty if `sourceImage` is set. + Must be empty if `read_only` is false. + Updating `source_snapshot` will update content in the ephemeral directory after the workstation is restarted. + min_version: 'beta' + - name: 'sourceImage' + type: String + description: | + Name of the disk image to use as the source for the disk. + + Must be empty `sourceSnapshot` is set. + Updating `sourceImage` will update content in the ephemeral directory after the workstation is restarted. + min_version: 'beta' + - name: 'readOnly' + type: Boolean + description: | + Whether the disk is read only. If true, the disk may be shared by multiple VMs and `sourceSnapshot` must be set. + min_version: 'beta' + - name: 'container' + type: NestedObject + description: | + Container that will be run for each workstation using this configuration when that workstation is started. + min_version: 'beta' + default_from_api: true + update_mask_fields: + - 'container.image' + - 'container.command' + - 'container.args' + - 'container.workingDir' + - 'container.env' + - 'container.runAsUser' + properties: + - name: 'image' + type: String + description: | + Docker image defining the container. This image must be accessible by the config's service account. + min_version: 'beta' + default_from_api: true + - name: 'command' + type: Array + description: | + If set, overrides the default ENTRYPOINT specified by the image. + min_version: 'beta' + item_type: + type: String + - name: 'args' + type: Array + description: | + Arguments passed to the entrypoint. + min_version: 'beta' + item_type: + type: String + - name: 'workingDir' + type: String + description: | + If set, overrides the default DIR specified by the image. + # Allow unsetting to revert to container default. + min_version: 'beta' + send_empty_value: true + - name: 'env' + type: KeyValuePairs + description: | + Environment variables passed to the container. + The elements are of the form "KEY=VALUE" for the environment variable "KEY" being given the value "VALUE". + min_version: 'beta' + - name: 'runAsUser' + type: Integer + description: | + If set, overrides the USER specified in the image with the given uid. + min_version: 'beta' + - name: 'encryptionKey' + type: NestedObject + description: | + Encrypts resources of this workstation configuration using a customer-managed encryption key. + + If specified, the boot disk of the Compute Engine instance and the persistent disk are encrypted using this encryption key. If this field is not set, the disks are encrypted using a generated key. Customer-managed encryption keys do not protect disk metadata. + If the customer-managed encryption key is rotated, when the workstation instance is stopped, the system attempts to recreate the persistent disk with the new version of the key. Be sure to keep older versions of the key until the persistent disk is recreated. Otherwise, data on the persistent disk will be lost. + If the encryption key is revoked, the workstation session will automatically be stopped within 7 hours. + min_version: 'beta' + immutable: true + properties: + - name: 'kmsKey' + type: String + description: | + The name of the Google Cloud KMS encryption key. + min_version: 'beta' + required: true + - name: 'kmsKeyServiceAccount' + type: String + description: | + The service account to use with the specified KMS key. + min_version: 'beta' + required: true + - name: 'readinessChecks' + type: Array + description: | + Readiness checks to be performed on a workstation. + min_version: 'beta' + item_type: + type: NestedObject + properties: + - name: 'path' + type: String + description: | + Path to which the request should be sent. + min_version: 'beta' + required: true + - name: 'port' + type: Integer + description: | + Port to which the request should be sent. + min_version: 'beta' + required: true + - name: 'degraded' + type: Boolean + description: | + Whether this resource is in degraded mode, in which case it may require user action to restore full functionality. Details can be found in the conditions field. + min_version: 'beta' + output: true + - name: 'disableTcpConnections' + type: Boolean + description: | + Disables support for plain TCP connections in the workstation. By default the service supports TCP connections via a websocket relay. Setting this option to true disables that relay, which prevents the usage of services that require plain tcp connections, such as ssh. When enabled, all communication must occur over https or wss. + min_version: 'beta' + - name: 'conditions' + type: Array + description: |- + Status conditions describing the current resource state. + min_version: 'beta' + output: true + item_type: + type: NestedObject + properties: + - name: 'code' + type: Integer + description: |- + The status code, which should be an enum value of google.rpc.Code. + min_version: 'beta' + output: true + - name: 'message' + type: String + description: |- + Human readable message indicating details about the current status. + min_version: 'beta' + output: true + - name: 'details' + type: Array + description: | + A list of messages that carry the error details. + min_version: 'beta' + output: true + item_type: + type: KeyValuePairs diff --git a/mmv1/products/workstations/go_product.yaml b/mmv1/products/workstations/go_product.yaml new file mode 100644 index 000000000000..7400feef3a9b --- /dev/null +++ b/mmv1/products/workstations/go_product.yaml @@ -0,0 +1,22 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'Workstations' +display_name: 'Cloud Workstations' +versions: + - name: 'beta' + base_url: 'https://workstations.googleapis.com/v1beta/' +scopes: + - 'https://www.googleapis.com/auth/cloud-platform' diff --git a/mmv1/provider/template_data.go b/mmv1/provider/template_data.go index 7f7105eb38da..8824e2a9ae94 100644 --- a/mmv1/provider/template_data.go +++ b/mmv1/provider/template_data.go @@ -26,7 +26,6 @@ import ( "text/template" "github.com/GoogleCloudPlatform/magic-modules/mmv1/api" - "github.com/GoogleCloudPlatform/magic-modules/mmv1/api/product" "github.com/GoogleCloudPlatform/magic-modules/mmv1/google" "github.com/golang/glog" ) @@ -35,7 +34,7 @@ type TemplateData struct { // include Compile::Core OutputFolder string - Version product.Version + VersionName string TerraformResourceDirectory string TerraformProviderModule string @@ -50,13 +49,13 @@ var GA_VERSION = "ga" var BETA_VERSION = "beta" var ALPHA_VERSION = "alpha" -func NewTemplateData(outputFolder string, version product.Version) *TemplateData { - td := TemplateData{OutputFolder: outputFolder, Version: version} +func NewTemplateData(outputFolder string, versionName string) *TemplateData { + td := TemplateData{OutputFolder: outputFolder, VersionName: versionName} - if version.Name == GA_VERSION { + if versionName == GA_VERSION { td.TerraformResourceDirectory = "google" td.TerraformProviderModule = "github.com/hashicorp/terraform-provider-google" - } else if version.Name == ALPHA_VERSION { + } else if versionName == ALPHA_VERSION { td.TerraformResourceDirectory = "google-private" td.TerraformProviderModule = "internal/terraform-next" } else { @@ -293,9 +292,9 @@ func (td *TemplateData) GenerateFile(filePath, templatePath string, input any, g // end func (td *TemplateData) ImportPath() string { - if td.Version.Name == GA_VERSION { + if td.VersionName == GA_VERSION { return "github.com/hashicorp/terraform-provider-google/google" - } else if td.Version.Name == ALPHA_VERSION { + } else if td.VersionName == ALPHA_VERSION { return "internal/terraform-next/google-private" } return "github.com/hashicorp/terraform-provider-google-beta/google-beta" diff --git a/mmv1/provider/terraform.go b/mmv1/provider/terraform.go index 5ffc49dee51b..834df4869501 100644 --- a/mmv1/provider/terraform.go +++ b/mmv1/provider/terraform.go @@ -97,7 +97,7 @@ func (t *Terraform) GenerateObjects(outputFolder string, generateCode, generateD } func (t *Terraform) GenerateObject(object api.Resource, outputFolder, productPath string, generateCode, generateDocs bool) { - templateData := NewTemplateData(outputFolder, t.Version) + templateData := NewTemplateData(outputFolder, t.TargetVersionName) if !object.IsExcluded() { log.Printf("Generating %s resource", object.Name) @@ -143,7 +143,7 @@ func (t *Terraform) GenerateResourceTests(object api.Resource, templateData Temp eligibleExample := false for _, example := range object.Examples { if !example.SkipTest { - if object.ProductMetadata.VersionObjOrClosest(t.Version.Name).CompareTo(object.ProductMetadata.VersionObjOrClosest(example.MinVersion)) > 0 { + if object.ProductMetadata.VersionObjOrClosest(t.Version.Name).CompareTo(object.ProductMetadata.VersionObjOrClosest(example.MinVersion)) >= 0 { eligibleExample = true break } @@ -190,7 +190,7 @@ func (t *Terraform) GenerateOperation(outputFolder string) { log.Println(fmt.Errorf("error creating parent directory %v: %v", targetFolder, err)) } targetFilePath := path.Join(targetFolder, fmt.Sprintf("%s_operation.go", google.Underscore(t.Product.Name))) - templateData := NewTemplateData(outputFolder, t.Version) + templateData := NewTemplateData(outputFolder, t.TargetVersionName) templateData.GenerateOperationFile(targetFilePath, *asyncObjects[0]) } @@ -392,7 +392,7 @@ func (t Terraform) CopyFileList(outputFolder string, files map[string]string) { func (t Terraform) CompileCommonFiles(outputFolder string, products []*api.Product, overridePath string) { t.generateResourcesForVersion(products) files := t.getCommonCompileFiles(t.TargetVersionName) - templateData := NewTemplateData(outputFolder, t.Version) + templateData := NewTemplateData(outputFolder, t.TargetVersionName) t.CompileFileList(outputFolder, files, *templateData, products) } diff --git a/mmv1/provider/terraform.rb b/mmv1/provider/terraform.rb index 5ba3f3b46763..bd6abab44f8a 100644 --- a/mmv1/provider/terraform.rb +++ b/mmv1/provider/terraform.rb @@ -410,6 +410,9 @@ def generate_object(object, output_folder, version_name, generate_code, generate end def generate_object_modified(object, output_folder, version_name) + # skip healthcare - exceptional case will be done manually + return if output_folder.include? 'healthcare' + pwd = Dir.pwd data = build_object_data(pwd, object, output_folder, version_name) Dir.chdir output_folder diff --git a/mmv1/templates/terraform/constants/go/bigquery_dataset_access.go.tmpl b/mmv1/templates/terraform/constants/go/bigquery_dataset_access.go.tmpl index af237ca83f40..e3b8f39cbee3 100644 --- a/mmv1/templates/terraform/constants/go/bigquery_dataset_access.go.tmpl +++ b/mmv1/templates/terraform/constants/go/bigquery_dataset_access.go.tmpl @@ -27,22 +27,30 @@ func resourceBigQueryDatasetAccessIamMemberDiffSuppress(k, old, new string, d *s } if memberInState := d.Get("user_by_email").(string); memberInState != "" { - return strings.ToUpper(memberInState) == strings.ToUpper(strippedIamMember) + return strings.ToLower(memberInState) == strings.ToLower(strippedIamMember) } if memberInState := d.Get("group_by_email").(string); memberInState != "" { - return strings.ToUpper(memberInState) == strings.ToUpper(strippedIamMember) + return strings.ToLower(memberInState) == strings.ToLower(strippedIamMember) } if memberInState := d.Get("domain").(string); memberInState != "" { - return strings.ToUpper(memberInState) == strings.ToUpper(strippedIamMember) + return strings.ToLower(memberInState) == strings.ToLower(strippedIamMember) } if memberInState := d.Get("special_group").(string); memberInState != "" { - return strings.ToUpper(memberInState) == strings.ToUpper(strippedIamMember) + return strings.ToLower(memberInState) == strings.ToLower(strippedIamMember) } } + if memberInState := d.Get("user_by_email").(string); memberInState != "" { + return strings.ToLower(old) == strings.ToLower(new) + } + + if memberInState := d.Get("group_by_email").(string); memberInState != "" { + return strings.ToLower(old) == strings.ToLower(new) + } + return false } diff --git a/mmv1/templates/terraform/constants/go/compute_resource_policy.go.tmpl b/mmv1/templates/terraform/constants/go/compute_resource_policy.go.tmpl new file mode 100644 index 000000000000..4acee74aa971 --- /dev/null +++ b/mmv1/templates/terraform/constants/go/compute_resource_policy.go.tmpl @@ -0,0 +1,5 @@ +// Suppresses a diff on cases like 1:00 when it should be 01:00. +// Because API will normalize this value +func HourlyFormatSuppressDiff(_, old, new string, _ *schema.ResourceData) bool { + return old == "0"+new +} \ No newline at end of file diff --git a/mmv1/templates/terraform/constants/go/notebooks_instance.go.tmpl b/mmv1/templates/terraform/constants/go/notebooks_instance.go.tmpl index fe8d016797b3..17c73cc0954e 100644 --- a/mmv1/templates/terraform/constants/go/notebooks_instance.go.tmpl +++ b/mmv1/templates/terraform/constants/go/notebooks_instance.go.tmpl @@ -87,7 +87,8 @@ func modifyNotebooksInstanceState(config *transport_tpg.Config, d *schema.Resour } return res, nil } -{{ if ne $.Compiler "terraformgoogleconversion-codegen" }} + +{{- if ne $.Compiler "terraformgoogleconversion-codegen" }} func waitForNotebooksOperation(config *transport_tpg.Config, d *schema.ResourceData, project string, billingProject string, userAgent string, response map[string]interface{}) error { var opRes map[string]interface{} err := NotebooksOperationWaitTimeWithResponse( diff --git a/mmv1/templates/terraform/constants/go/subnetwork.tmpl b/mmv1/templates/terraform/constants/go/subnetwork.tmpl index 3833540942b6..74edec68a5a5 100644 --- a/mmv1/templates/terraform/constants/go/subnetwork.tmpl +++ b/mmv1/templates/terraform/constants/go/subnetwork.tmpl @@ -17,3 +17,34 @@ func IsShrinkageIpCidr(_ context.Context, old, new, _ interface{}) bool { return true } + +func sendSecondaryIpRangeIfEmptyDiff(_ context.Context, diff *schema.ResourceDiff, meta interface{}) error { + // on create, return immediately as we don't need to determine if the value is empty or not + if diff.Id() == "" { + return nil + } + + sendZero := diff.Get("send_secondary_ip_range_if_empty").(bool) + if !sendZero { + return nil + } + + configSecondaryIpRange := diff.GetRawConfig().GetAttr("secondary_ip_range") + if !configSecondaryIpRange.IsKnown() { + return nil + } + configValueIsEmpty := configSecondaryIpRange.IsNull() || configSecondaryIpRange.LengthInt() == 0 + + stateSecondaryIpRange := diff.GetRawState().GetAttr("secondary_ip_range") + if !stateSecondaryIpRange.IsKnown() { + return nil + } + stateValueIsEmpty := stateSecondaryIpRange.IsNull() || stateSecondaryIpRange.LengthInt() == 0 + + if configValueIsEmpty && !stateValueIsEmpty { + log.Printf("[DEBUG] setting secondary_ip_range to newly empty") + diff.SetNew("secondary_ip_range", make([]interface{}, 0)) + } + + return nil +} \ No newline at end of file diff --git a/mmv1/templates/terraform/constants/go/vmwareengine_private_cloud.go.tmpl b/mmv1/templates/terraform/constants/go/vmwareengine_private_cloud.go.tmpl index b2941d68a57c..5123f7eaf04e 100644 --- a/mmv1/templates/terraform/constants/go/vmwareengine_private_cloud.go.tmpl +++ b/mmv1/templates/terraform/constants/go/vmwareengine_private_cloud.go.tmpl @@ -31,3 +31,49 @@ func isMultiNodePrivateCloud(d *schema.ResourceData) bool { } return false } + +func isPrivateCloudInDeletedState(config *transport_tpg.Config, d *schema.ResourceData, billingProject string, userAgent string) (bool, error) { + baseurl, err := tpgresource.ReplaceVars(d, config, "{{"{{"}}{{$.ProductMetadata.Name}}BasePath{{"}}"}}{{$.SelfLinkUri}}") + if err != nil { + return false, err + } + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: baseurl, + UserAgent: userAgent, + }) + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[DEBUG] No existing private cloud found") + return false, nil + } + return false, err + } + // if resource exists but is marked for deletion + v, ok := res["state"] + if ok && v.(string) == "DELETED" { + log.Printf("[DEBUG] The Private cloud exists and is marked for deletion.") + return true, nil + } + return false, nil +} + +// Check if private cloud is absent or if it exists in a deleted state. +func pollCheckForPrivateCloudAbsence(resp map[string]interface{}, respErr error) transport_tpg.PollResult { + if respErr != nil { + if transport_tpg.IsGoogleApiErrorWithCode(respErr, 404) { + return transport_tpg.SuccessPollResult() + } + return transport_tpg.ErrorPollResult(respErr) + } + // if resource exists but is marked for deletion + log.Printf("[DEBUG] Fetching state of the private cloud.") + v, ok := resp["state"] + if ok && v.(string) == "DELETED" { + log.Printf("[DEBUG] The Private cloud has been successfully marked for delayed deletion.") + return transport_tpg.SuccessPollResult() + } + return transport_tpg.PendingStatusPollResult("found") +} diff --git a/mmv1/templates/terraform/constants/go/workbench_instance.go.tmpl b/mmv1/templates/terraform/constants/go/workbench_instance.go.tmpl index d133473847c7..5dc7e90f5558 100644 --- a/mmv1/templates/terraform/constants/go/workbench_instance.go.tmpl +++ b/mmv1/templates/terraform/constants/go/workbench_instance.go.tmpl @@ -24,59 +24,62 @@ func WorkbenchInstanceLabelsDiffSuppress(k, old, new string, d *schema.ResourceD var WorkbenchInstanceProvidedMetadata = []string{ - "agent-health-check-interval-seconds", - "agent-health-check-path", - "container", - "custom-container-image", - "custom-container-payload", - "data-disk-uri", - "dataproc-allow-custom-clusters", - "dataproc-cluster-name", - "dataproc-configs", - "dataproc-default-subnet", - "dataproc-locations-list", - "dataproc-machine-types-list", - "dataproc-notebooks-url", - "dataproc-region", - "dataproc-service-account", - "disable-check-xsrf", - "framework", - "gcs-data-bucket", - "generate-diagnostics-bucket", - "generate-diagnostics-file", - "generate-diagnostics-options", - "image-url", - "install-monitoring-agent", - "install-nvidia-driver", - "installed-extensions", - "last_updated_diagnostics", - "notebooks-api", - "notebooks-api-version", - "notebooks-examples-location", - "notebooks-location", - "proxy-backend-id", - "proxy-byoid-url", - "proxy-mode", - "proxy-status", - "proxy-url", - "proxy-user-mail", - "report-container-health", - "report-event-url", - "report-notebook-metrics", - "report-system-health", - "report-system-status", - "restriction", - "serial-port-logging-enable", - "shutdown-script", - "title", - "use-collaborative", - "user-data", - "version", - - "disable-swap-binaries", - "enable-guest-attributes", - "enable-oslogin", - "proxy-registration-url", + "agent-health-check-interval-seconds", + "agent-health-check-path", + "container", + "cos-update-strategy", + "custom-container-image", + "custom-container-payload", + "data-disk-uri", + "dataproc-allow-custom-clusters", + "dataproc-cluster-name", + "dataproc-configs", + "dataproc-default-subnet", + "dataproc-locations-list", + "dataproc-machine-types-list", + "dataproc-notebooks-url", + "dataproc-region", + "dataproc-service-account", + "disable-check-xsrf", + "framework", + "gcs-data-bucket", + "generate-diagnostics-bucket", + "generate-diagnostics-file", + "generate-diagnostics-options", + "google-logging-enabled", + "image-url", + "install-monitoring-agent", + "install-nvidia-driver", + "installed-extensions", + "last_updated_diagnostics", + "notebooks-api", + "notebooks-api-version", + "notebooks-examples-location", + "notebooks-location", + "proxy-backend-id", + "proxy-byoid-url", + "proxy-mode", + "proxy-status", + "proxy-url", + "proxy-user-mail", + "report-container-health", + "report-event-url", + "report-notebook-metrics", + "report-system-health", + "report-system-status", + "restriction", + "serial-port-logging-enable", + "service-account-mode", + "shutdown-script", + "title", + "use-collaborative", + "user-data", + "version", + + "disable-swap-binaries", + "enable-guest-attributes", + "enable-oslogin", + "proxy-registration-url", } func WorkbenchInstanceMetadataDiffSuppress(k, old, new string, d *schema.ResourceData) bool { diff --git a/mmv1/templates/terraform/custom_expand/go/cloudquotas_quota_preference_trace_id.go.tmpl b/mmv1/templates/terraform/custom_expand/go/cloudquotas_quota_preference_trace_id.go.tmpl new file mode 100644 index 000000000000..1518e1014a61 --- /dev/null +++ b/mmv1/templates/terraform/custom_expand/go/cloudquotas_quota_preference_trace_id.go.tmpl @@ -0,0 +1,3 @@ +func expand{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return nil, nil +} \ No newline at end of file diff --git a/mmv1/templates/terraform/custom_expand/go/name_or_name_prefix.go.tmpl b/mmv1/templates/terraform/custom_expand/go/name_or_name_prefix.go.tmpl index c5754ad27380..7f57f25f3d02 100644 --- a/mmv1/templates/terraform/custom_expand/go/name_or_name_prefix.go.tmpl +++ b/mmv1/templates/terraform/custom_expand/go/name_or_name_prefix.go.tmpl @@ -15,7 +15,12 @@ func expand{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d tpgresource.T if v, ok := d.GetOk("name"); ok { certName = v.(string) } else if v, ok := d.GetOk("name_prefix"); ok { - certName = id.PrefixedUniqueId(v.(string)) + prefix := v.(string) + if len(prefix) > 37 { + certName = tpgresource.ReducedPrefixedUniqueId(prefix) + } else { + certName = id.PrefixedUniqueId(prefix) + } } else { certName = id.UniqueId() } diff --git a/mmv1/templates/terraform/custom_expand/go/string_to_lower_case.go.tmpl b/mmv1/templates/terraform/custom_expand/go/string_to_lower_case.go.tmpl new file mode 100644 index 000000000000..5b419947c9ca --- /dev/null +++ b/mmv1/templates/terraform/custom_expand/go/string_to_lower_case.go.tmpl @@ -0,0 +1,19 @@ +{{/* + The license inside this block applies to this file + Copyright 2024 Google Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ -}} +func expand{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + if v == nil { + return nil, nil + } + + return strings.ToLower(v.(string)), nil +} diff --git a/mmv1/templates/terraform/custom_flatten/go/cloudfunctions2_function_source_generation.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/cloudfunctions2_function_source_generation.go.tmpl new file mode 100644 index 000000000000..ffdee7b68523 --- /dev/null +++ b/mmv1/templates/terraform/custom_flatten/go/cloudfunctions2_function_source_generation.go.tmpl @@ -0,0 +1,38 @@ +{{/* + The license inside this block applies to this file + Copyright 2024 Google Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ -}} +func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // This flatten function is shared between the resource and the datasource. + // TF Input will use the generation from the source object + // GET Response will use the generation from the automatically created object + // As TF Input and GET response values have different format, + // we will return TF Input value to prevent state drift. + + if genVal, ok := d.GetOk("build_config.0.source.0.storage_source.0.generation"); ok { + v = genVal + } + + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} diff --git a/mmv1/templates/terraform/custom_flatten/go/discoveryengine_schema_json_schema.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/discoveryengine_schema_json_schema.go.tmpl new file mode 100644 index 000000000000..7eb7c54775a2 --- /dev/null +++ b/mmv1/templates/terraform/custom_flatten/go/discoveryengine_schema_json_schema.go.tmpl @@ -0,0 +1,23 @@ +{{/* + The license inside this block applies to this file + Copyright 2024 Google Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ -}} + +func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + s, err := structure.NormalizeJsonString(v) + if err != nil { + log.Printf("[ERROR] failed to normalize JSON string: %v", err) + } + return s +} \ No newline at end of file diff --git a/mmv1/templates/terraform/custom_import/go/service_directory_service.go.tmpl b/mmv1/templates/terraform/custom_import/go/service_directory_service.go.tmpl index 78cb56c58cd0..54e6d0f587d0 100644 --- a/mmv1/templates/terraform/custom_import/go/service_directory_service.go.tmpl +++ b/mmv1/templates/terraform/custom_import/go/service_directory_service.go.tmpl @@ -53,3 +53,4 @@ if len(nameParts) == 8 { "{{"{{"}}location{{"}}"}}/{{"{{"}}namespace_id{{"}}"}}/{{"{{"}}service_id{{"}}"}}") } return []*schema.ResourceData{d}, nil + diff --git a/mmv1/templates/terraform/custom_import/go/vertex_ai_index_endpoint_deployed_index.go.tmpl b/mmv1/templates/terraform/custom_import/go/vertex_ai_index_endpoint_deployed_index.go.tmpl new file mode 100644 index 000000000000..529fe042e33f --- /dev/null +++ b/mmv1/templates/terraform/custom_import/go/vertex_ai_index_endpoint_deployed_index.go.tmpl @@ -0,0 +1,17 @@ +config := meta.(*transport_tpg.Config) + +// current import_formats can't import fields with forward slashes in their value +if err := tpgresource.ParseImportId([]string{ + "(?P.+)/deployedIndex/(?P[^/]+)", +}, d, config); err != nil { + return nil, err +} + +// Replace import id for the resource id +id, err := tpgresource.ReplaceVars(d, config, "{{"{{"}}index_endpoint{{"}}"}}/deployedIndex/{{"{{"}}deployed_index_id{{"}}"}}") +if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) +} +d.SetId(id) + +return []*schema.ResourceData{d}, nil \ No newline at end of file diff --git a/mmv1/templates/terraform/decoders/go/backend_service.go.tmpl b/mmv1/templates/terraform/decoders/go/backend_service.go.tmpl index 06c94a1d0553..3b9f86dd0fb4 100644 --- a/mmv1/templates/terraform/decoders/go/backend_service.go.tmpl +++ b/mmv1/templates/terraform/decoders/go/backend_service.go.tmpl @@ -10,18 +10,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -}} -// We need to pretend IAP isn't there if it's disabled for Terraform to maintain -// BC behaviour with the handwritten resource. -v, ok := res["iap"] -if !ok || v == nil { - delete(res, "iap") - return res, nil -} -m := v.(map[string]interface{}) -if ok && m["enabled"] == false { - delete(res, "iap") -} - // Requests with consistentHash will error for specific values of // localityLbPolicy. However, the API will not remove it if the backend // service is updated to from supporting to non-supporting localityLbPolicy diff --git a/mmv1/templates/terraform/decoders/go/bigquery_data_transfer.go.tmpl b/mmv1/templates/terraform/decoders/go/bigquery_data_transfer.go.tmpl index a9d8d364bcc3..f674806c302a 100644 --- a/mmv1/templates/terraform/decoders/go/bigquery_data_transfer.go.tmpl +++ b/mmv1/templates/terraform/decoders/go/bigquery_data_transfer.go.tmpl @@ -21,6 +21,19 @@ if paramMap, ok := res["params"]; ok { } } } + for k, v := range params { + switch v.(type) { + case []interface{}, map[string]interface{}: + value, err := json.Marshal(v) + if err != nil { + return nil, err + } + params[k] = string(value) + default: + params[k] = v + } + } + res["params"] = params } return res, nil \ No newline at end of file diff --git a/mmv1/templates/terraform/decoders/go/region_backend_service.go.tmpl b/mmv1/templates/terraform/decoders/go/region_backend_service.go.tmpl index 560cd1243da7..e105509f502c 100644 --- a/mmv1/templates/terraform/decoders/go/region_backend_service.go.tmpl +++ b/mmv1/templates/terraform/decoders/go/region_backend_service.go.tmpl @@ -10,22 +10,11 @@ See the License for the specific language governing permissions and limitations under the License. */ -}} -// We need to pretend IAP isn't there if it's disabled for Terraform to maintain -// BC behaviour with the handwritten resource. -v, ok := res["iap"] -if !ok || v == nil { - delete(res, "iap") - return res, nil -} -m := v.(map[string]interface{}) -if ok && m["enabled"] == false { - delete(res, "iap") -} {{ if ne $.TargetVersionName `ga` -}} // Since we add in a NONE subsetting policy, we need to remove it in some // cases for backwards compatibility with the config -v, ok = res["subsetting"] +v, ok := res["subsetting"] if ok && v != nil { subsetting := v.(map[string]interface{}) policy, ok := subsetting["policy"] diff --git a/mmv1/templates/terraform/decoders/go/vertex_ai_index_endpoint_deployed_index.go.tmpl b/mmv1/templates/terraform/decoders/go/vertex_ai_index_endpoint_deployed_index.go.tmpl new file mode 100644 index 000000000000..4775b5119c94 --- /dev/null +++ b/mmv1/templates/terraform/decoders/go/vertex_ai_index_endpoint_deployed_index.go.tmpl @@ -0,0 +1,20 @@ +v, ok := res["deployedIndexes"] +if !ok || v == nil { // CREATE + res["name"] = res["deployedIndexId"] + delete(res, "deployedIndexId") + return res, nil +} +dpIndex := make(map[string]interface{}) +for _, v := range v.([]interface{}) { + dpI := v.(map[string]interface{}) + if dpI["id"] == d.Get("deployed_index_id").(string) { + dpI["indexEndpoint"] = d.Get("index_endpoint") + dpI["deployedIndexId"] = d.Get("deployed_index_id") + dpIndex = dpI + break + } +} +if dpIndex == nil { + return nil, fmt.Errorf("Error: Deployment Index not Found") +} +return dpIndex, nil \ No newline at end of file diff --git a/mmv1/templates/terraform/encoders/go/backend_service.go.tmpl b/mmv1/templates/terraform/encoders/go/backend_service.go.tmpl index 333664a0ba96..f92c155ea246 100644 --- a/mmv1/templates/terraform/encoders/go/backend_service.go.tmpl +++ b/mmv1/templates/terraform/encoders/go/backend_service.go.tmpl @@ -10,24 +10,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -}} -// The BackendService API's Update / PUT API is badly formed and behaves like -// a PATCH field for at least IAP. When sent a `null` `iap` field, the API -// doesn't disable an existing field. To work around this, we need to emulate -// the old Terraform behaviour of always sending the block (at both update and -// create), and force sending each subfield as empty when the block isn't -// present in config. - -iapVal := obj["iap"] -if iapVal == nil { - data := map[string]interface{}{} - data["enabled"] = false - obj["iap"] = data -} else { - iap := iapVal.(map[string]interface{}) - iap["enabled"] = true - obj["iap"] = iap -} - backendsRaw, ok := obj["backends"] if !ok { return obj, nil diff --git a/mmv1/templates/terraform/encoders/go/bigquery_data_transfer.go.tmpl b/mmv1/templates/terraform/encoders/go/bigquery_data_transfer.go.tmpl index 4ec9950635f4..631b88f55ff6 100644 --- a/mmv1/templates/terraform/encoders/go/bigquery_data_transfer.go.tmpl +++ b/mmv1/templates/terraform/encoders/go/bigquery_data_transfer.go.tmpl @@ -15,8 +15,24 @@ if !ok { paramMap = make(map[string]string) } -var params map[string]string -params = paramMap.(map[string]string) +params := map[string]interface{}{} + +for k, v := range paramMap.(map[string]string) { + var value interface{} + if err := json.Unmarshal([]byte(v), &value); err != nil { + // If the value is a string, don't convert it to anything. + params[k] = v + } else { + switch value.(type) { + case float64: + // If the value is a number, keep the string representation. + params[k] = v + default: + // If the value is another JSON type, keep the unmarshalled type as is. + params[k] = value + } + } +} for _, sp := range sensitiveParams { if auth, _ := d.GetOkExists("sensitive_params.0." + sp); auth != "" { diff --git a/mmv1/templates/terraform/encoders/go/compute_region_target_https_proxy.go.tmpl b/mmv1/templates/terraform/encoders/go/compute_region_target_https_proxy.go.tmpl index 168d4a65c5ef..bcf2aa79977f 100644 --- a/mmv1/templates/terraform/encoders/go/compute_region_target_https_proxy.go.tmpl +++ b/mmv1/templates/terraform/encoders/go/compute_region_target_https_proxy.go.tmpl @@ -7,4 +7,12 @@ if _, ok := obj["certificateManagerCertificates"]; ok { obj["sslCertificates"] = obj["certificateManagerCertificates"] delete(obj, "certificateManagerCertificates") } -return obj, nil \ No newline at end of file + +// Send null if serverTlsPolicy is not set. Without this, Terraform would not send any value for `serverTlsPolicy` +// in the "PATCH" payload so if you were to remove a server TLS policy from a target HTTPS proxy, it would NOT remove +// the association. +if _, ok := obj["serverTlsPolicy"]; !ok { + obj["serverTlsPolicy"] = nil +} + +return obj, nil diff --git a/mmv1/templates/terraform/encoders/go/compute_target_https_proxy.go.tmpl b/mmv1/templates/terraform/encoders/go/compute_target_https_proxy.go.tmpl index 168d4a65c5ef..bcf2aa79977f 100644 --- a/mmv1/templates/terraform/encoders/go/compute_target_https_proxy.go.tmpl +++ b/mmv1/templates/terraform/encoders/go/compute_target_https_proxy.go.tmpl @@ -7,4 +7,12 @@ if _, ok := obj["certificateManagerCertificates"]; ok { obj["sslCertificates"] = obj["certificateManagerCertificates"] delete(obj, "certificateManagerCertificates") } -return obj, nil \ No newline at end of file + +// Send null if serverTlsPolicy is not set. Without this, Terraform would not send any value for `serverTlsPolicy` +// in the "PATCH" payload so if you were to remove a server TLS policy from a target HTTPS proxy, it would NOT remove +// the association. +if _, ok := obj["serverTlsPolicy"]; !ok { + obj["serverTlsPolicy"] = nil +} + +return obj, nil diff --git a/mmv1/templates/terraform/encoders/go/region_backend_service.go.tmpl b/mmv1/templates/terraform/encoders/go/region_backend_service.go.tmpl index 40010b8ac301..12e1d789053f 100644 --- a/mmv1/templates/terraform/encoders/go/region_backend_service.go.tmpl +++ b/mmv1/templates/terraform/encoders/go/region_backend_service.go.tmpl @@ -10,23 +10,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -}} -// The RegionBackendService API's Update / PUT API is badly formed and behaves like -// a PATCH field for at least IAP. When sent a `null` `iap` field, the API -// doesn't disable an existing field. To work around this, we need to emulate -// the old Terraform behaviour of always sending the block (at both update and -// create), and force sending each subfield as empty when the block isn't -// present in config. - -iapVal := obj["iap"] -if iapVal == nil { - data := map[string]interface{}{} - data["enabled"] = false - obj["iap"] = data -} else { - iap := iapVal.(map[string]interface{}) - iap["enabled"] = true - obj["iap"] = iap -} if d.Get("load_balancing_scheme").(string) == "EXTERNAL_MANAGED" || d.Get("load_balancing_scheme").(string) == "INTERNAL_MANAGED" { return obj, nil diff --git a/mmv1/templates/terraform/encoders/go/vertex_ai_index_endpoint_deployed_index.go.tmpl b/mmv1/templates/terraform/encoders/go/vertex_ai_index_endpoint_deployed_index.go.tmpl new file mode 100644 index 000000000000..487a61305df4 --- /dev/null +++ b/mmv1/templates/terraform/encoders/go/vertex_ai_index_endpoint_deployed_index.go.tmpl @@ -0,0 +1,7 @@ +req := make(map[string]interface{}) +obj["id"] = d.Get("deployed_index_id") +delete(obj, "deployedIndexId") +delete(obj, "name") +delete(obj, "indexEndpoint") +req["deployedIndex"] = obj +return req, nil \ No newline at end of file diff --git a/mmv1/templates/terraform/encoders/go/workflow.go.tmpl b/mmv1/templates/terraform/encoders/go/workflow.go.tmpl index 4888f187bd15..511fde357259 100644 --- a/mmv1/templates/terraform/encoders/go/workflow.go.tmpl +++ b/mmv1/templates/terraform/encoders/go/workflow.go.tmpl @@ -2,7 +2,12 @@ var ResName string if v, ok := d.GetOk("name"); ok { ResName = v.(string) } else if v, ok := d.GetOk("name_prefix"); ok { - ResName = id.PrefixedUniqueId(v.(string)) + prefix := v.(string) + if len(prefix) > 37 { + ResName = tpgresource.ReducedPrefixedUniqueId(prefix) + } else { + ResName = id.PrefixedUniqueId(prefix) + } } else { ResName = id.UniqueId() } diff --git a/mmv1/templates/terraform/examples/apphub_application_full.tf.erb b/mmv1/templates/terraform/examples/apphub_application_full.tf.erb index 42d9de520464..be22ec1d0b4d 100644 --- a/mmv1/templates/terraform/examples/apphub_application_full.tf.erb +++ b/mmv1/templates/terraform/examples/apphub_application_full.tf.erb @@ -5,7 +5,7 @@ resource "google_apphub_application" "<%= ctx[:primary_resource_id] %>" { scope { type = "REGIONAL" } - description = "<%= ctx[:vars]['description'] %>" + description = "<%= ctx[:vars]['desc'] %>" attributes { environment { type = "STAGING" diff --git a/mmv1/templates/terraform/examples/apphub_service_full.tf.erb b/mmv1/templates/terraform/examples/apphub_service_full.tf.erb index e76a286dc23c..0a8403c3fbf7 100644 --- a/mmv1/templates/terraform/examples/apphub_service_full.tf.erb +++ b/mmv1/templates/terraform/examples/apphub_service_full.tf.erb @@ -50,7 +50,7 @@ resource "google_apphub_service" "<%= ctx[:primary_resource_id] %>" { service_id = google_compute_forwarding_rule.forwarding_rule.name discovered_service = data.google_apphub_discovered_service.catalog-service.name display_name = "<%= ctx[:vars]['display_name'] %>" - description = "<%= ctx[:vars]['description'] %>" + description = "<%= ctx[:vars]['desc'] %>" attributes { environment { type = "STAGING" diff --git a/mmv1/templates/terraform/examples/apphub_workload_full.tf.erb b/mmv1/templates/terraform/examples/apphub_workload_full.tf.erb index bd92c81d0e7e..ffc71316e1b5 100644 --- a/mmv1/templates/terraform/examples/apphub_workload_full.tf.erb +++ b/mmv1/templates/terraform/examples/apphub_workload_full.tf.erb @@ -49,7 +49,7 @@ resource "google_apphub_workload" "<%= ctx[:primary_resource_id] %>" { workload_id = google_compute_region_instance_group_manager.mig.name discovered_workload = data.google_apphub_discovered_workload.catalog-workload.name display_name = "<%= ctx[:vars]['display_name'] %>" - description = "<%= ctx[:vars]['description'] %>" + description = "<%= ctx[:vars]['desc'] %>" attributes { environment { type = "STAGING" diff --git a/mmv1/templates/terraform/examples/artifact_registry_repository_basic.tf.erb b/mmv1/templates/terraform/examples/artifact_registry_repository_basic.tf.erb index 0b6cee4f9d65..5d0145d426f7 100644 --- a/mmv1/templates/terraform/examples/artifact_registry_repository_basic.tf.erb +++ b/mmv1/templates/terraform/examples/artifact_registry_repository_basic.tf.erb @@ -1,6 +1,6 @@ resource "google_artifact_registry_repository" "<%= ctx[:primary_resource_id] %>" { location = "us-central1" repository_id = "<%= ctx[:vars]['repository_id'] %>" - description = "<%= ctx[:vars]['description'] %>" + description = "<%= ctx[:vars]['desc'] %>" format = "DOCKER" } diff --git a/mmv1/templates/terraform/examples/artifact_registry_repository_cleanup.tf.erb b/mmv1/templates/terraform/examples/artifact_registry_repository_cleanup.tf.erb index cdbcd495d5c9..21b61833f8de 100644 --- a/mmv1/templates/terraform/examples/artifact_registry_repository_cleanup.tf.erb +++ b/mmv1/templates/terraform/examples/artifact_registry_repository_cleanup.tf.erb @@ -1,7 +1,7 @@ resource "google_artifact_registry_repository" "<%= ctx[:primary_resource_id] %>" { location = "us-central1" repository_id = "<%= ctx[:vars]['repository_id'] %>" - description = "<%= ctx[:vars]['description'] %>" + description = "<%= ctx[:vars]['desc'] %>" format = "DOCKER" cleanup_policy_dry_run = false cleanup_policies { diff --git a/mmv1/templates/terraform/examples/artifact_registry_repository_docker.tf.erb b/mmv1/templates/terraform/examples/artifact_registry_repository_docker.tf.erb index 84e23ef06179..e718d64f9b29 100644 --- a/mmv1/templates/terraform/examples/artifact_registry_repository_docker.tf.erb +++ b/mmv1/templates/terraform/examples/artifact_registry_repository_docker.tf.erb @@ -1,7 +1,7 @@ resource "google_artifact_registry_repository" "<%= ctx[:primary_resource_id] %>" { location = "us-central1" repository_id = "<%= ctx[:vars]['repository_id'] %>" - description = "<%= ctx[:vars]['description'] %>" + description = "<%= ctx[:vars]['desc'] %>" format = "DOCKER" docker_config { diff --git a/mmv1/templates/terraform/examples/artifact_registry_repository_remote.tf.erb b/mmv1/templates/terraform/examples/artifact_registry_repository_remote.tf.erb index 67a22125e500..3ec0b7b3aa5b 100644 --- a/mmv1/templates/terraform/examples/artifact_registry_repository_remote.tf.erb +++ b/mmv1/templates/terraform/examples/artifact_registry_repository_remote.tf.erb @@ -1,7 +1,7 @@ resource "google_artifact_registry_repository" "<%= ctx[:primary_resource_id] %>" { location = "us-central1" repository_id = "<%= ctx[:vars]['repository_id'] %>" - description = "<%= ctx[:vars]['description'] %>" + description = "<%= ctx[:vars]['desc'] %>" format = "DOCKER" mode = "REMOTE_REPOSITORY" remote_repository_config { diff --git a/mmv1/templates/terraform/examples/artifact_registry_repository_remote_apt.tf.erb b/mmv1/templates/terraform/examples/artifact_registry_repository_remote_apt.tf.erb index fce0829c3a2e..e1810d2357b8 100644 --- a/mmv1/templates/terraform/examples/artifact_registry_repository_remote_apt.tf.erb +++ b/mmv1/templates/terraform/examples/artifact_registry_repository_remote_apt.tf.erb @@ -1,7 +1,7 @@ resource "google_artifact_registry_repository" "<%= ctx[:primary_resource_id] %>" { location = "us-central1" repository_id = "<%= ctx[:vars]['repository_id'] %>" - description = "<%= ctx[:vars]['description'] %>" + description = "<%= ctx[:vars]['desc'] %>" format = "APT" mode = "REMOTE_REPOSITORY" remote_repository_config { diff --git a/mmv1/templates/terraform/examples/artifact_registry_repository_remote_docker_custom_with_auth.tf.erb b/mmv1/templates/terraform/examples/artifact_registry_repository_remote_docker_custom_with_auth.tf.erb index ef515c550631..94942ac8d073 100644 --- a/mmv1/templates/terraform/examples/artifact_registry_repository_remote_docker_custom_with_auth.tf.erb +++ b/mmv1/templates/terraform/examples/artifact_registry_repository_remote_docker_custom_with_auth.tf.erb @@ -21,7 +21,7 @@ resource "google_secret_manager_secret_iam_member" "secret-access" { resource "google_artifact_registry_repository" "<%= ctx[:primary_resource_id] %>" { location = "us-central1" repository_id = "<%= ctx[:vars]['repository_id'] %>" - description = "<%= ctx[:vars]['description'] %>" + description = "<%= ctx[:vars]['desc'] %>" format = "DOCKER" mode = "REMOTE_REPOSITORY" remote_repository_config { diff --git a/mmv1/templates/terraform/examples/artifact_registry_repository_remote_dockerhub_auth.tf.erb b/mmv1/templates/terraform/examples/artifact_registry_repository_remote_dockerhub_auth.tf.erb index 00f616fc8306..fe47b73a605d 100644 --- a/mmv1/templates/terraform/examples/artifact_registry_repository_remote_dockerhub_auth.tf.erb +++ b/mmv1/templates/terraform/examples/artifact_registry_repository_remote_dockerhub_auth.tf.erb @@ -21,7 +21,7 @@ resource "google_secret_manager_secret_iam_member" "secret-access" { resource "google_artifact_registry_repository" "<%= ctx[:primary_resource_id] %>" { location = "us-central1" repository_id = "<%= ctx[:vars]['repository_id'] %>" - description = "<%= ctx[:vars]['description'] %>" + description = "<%= ctx[:vars]['desc'] %>" format = "DOCKER" mode = "REMOTE_REPOSITORY" remote_repository_config { diff --git a/mmv1/templates/terraform/examples/artifact_registry_repository_remote_maven_custom_with_auth.tf.erb b/mmv1/templates/terraform/examples/artifact_registry_repository_remote_maven_custom_with_auth.tf.erb index 9b07a1e78cdc..e6f31910b16b 100644 --- a/mmv1/templates/terraform/examples/artifact_registry_repository_remote_maven_custom_with_auth.tf.erb +++ b/mmv1/templates/terraform/examples/artifact_registry_repository_remote_maven_custom_with_auth.tf.erb @@ -21,7 +21,7 @@ resource "google_secret_manager_secret_iam_member" "secret-access" { resource "google_artifact_registry_repository" "<%= ctx[:primary_resource_id] %>" { location = "us-central1" repository_id = "<%= ctx[:vars]['repository_id'] %>" - description = "<%= ctx[:vars]['description'] %>" + description = "<%= ctx[:vars]['desc'] %>" format = "MAVEN" mode = "REMOTE_REPOSITORY" remote_repository_config { diff --git a/mmv1/templates/terraform/examples/artifact_registry_repository_remote_npm_custom_with_auth.tf.erb b/mmv1/templates/terraform/examples/artifact_registry_repository_remote_npm_custom_with_auth.tf.erb index 81d3e70b6021..cde2d86b8336 100644 --- a/mmv1/templates/terraform/examples/artifact_registry_repository_remote_npm_custom_with_auth.tf.erb +++ b/mmv1/templates/terraform/examples/artifact_registry_repository_remote_npm_custom_with_auth.tf.erb @@ -21,7 +21,7 @@ resource "google_secret_manager_secret_iam_member" "secret-access" { resource "google_artifact_registry_repository" "<%= ctx[:primary_resource_id] %>" { location = "us-central1" repository_id = "<%= ctx[:vars]['repository_id'] %>" - description = "<%= ctx[:vars]['description'] %>" + description = "<%= ctx[:vars]['desc'] %>" format = "NPM" mode = "REMOTE_REPOSITORY" remote_repository_config { diff --git a/mmv1/templates/terraform/examples/artifact_registry_repository_remote_python_custom_with_auth.tf.erb b/mmv1/templates/terraform/examples/artifact_registry_repository_remote_python_custom_with_auth.tf.erb index d9cbc61d6d05..dd633f41199c 100644 --- a/mmv1/templates/terraform/examples/artifact_registry_repository_remote_python_custom_with_auth.tf.erb +++ b/mmv1/templates/terraform/examples/artifact_registry_repository_remote_python_custom_with_auth.tf.erb @@ -21,7 +21,7 @@ resource "google_secret_manager_secret_iam_member" "secret-access" { resource "google_artifact_registry_repository" "<%= ctx[:primary_resource_id] %>" { location = "us-central1" repository_id = "<%= ctx[:vars]['repository_id'] %>" - description = "<%= ctx[:vars]['description'] %>" + description = "<%= ctx[:vars]['desc'] %>" format = "PYTHON" mode = "REMOTE_REPOSITORY" remote_repository_config { diff --git a/mmv1/templates/terraform/examples/artifact_registry_repository_remote_yum.tf.erb b/mmv1/templates/terraform/examples/artifact_registry_repository_remote_yum.tf.erb index 1e631e677059..0896665e8af0 100644 --- a/mmv1/templates/terraform/examples/artifact_registry_repository_remote_yum.tf.erb +++ b/mmv1/templates/terraform/examples/artifact_registry_repository_remote_yum.tf.erb @@ -1,7 +1,7 @@ resource "google_artifact_registry_repository" "<%= ctx[:primary_resource_id] %>" { location = "us-central1" repository_id = "<%= ctx[:vars]['repository_id'] %>" - description = "<%= ctx[:vars]['description'] %>" + description = "<%= ctx[:vars]['desc'] %>" format = "YUM" mode = "REMOTE_REPOSITORY" remote_repository_config { diff --git a/mmv1/templates/terraform/examples/artifact_registry_repository_virtual.tf.erb b/mmv1/templates/terraform/examples/artifact_registry_repository_virtual.tf.erb index 4d20c408b80b..35440ca93937 100644 --- a/mmv1/templates/terraform/examples/artifact_registry_repository_virtual.tf.erb +++ b/mmv1/templates/terraform/examples/artifact_registry_repository_virtual.tf.erb @@ -1,14 +1,14 @@ resource "google_artifact_registry_repository" "<%= ctx[:primary_resource_id] %>-upstream-1" { location = "us-central1" repository_id = "<%= ctx[:vars]['upstream_repository_id'] %>-1" - description = "<%= ctx[:vars]['upstream_description'] %> 1" + description = "<%= ctx[:vars]['upstream_desc'] %> 1" format = "DOCKER" } resource "google_artifact_registry_repository" "<%= ctx[:primary_resource_id] %>-upstream-2" { location = "us-central1" repository_id = "<%= ctx[:vars]['upstream_repository_id'] %>-2" - description = "<%= ctx[:vars]['upstream_description'] %> 2" + description = "<%= ctx[:vars]['upstream_desc'] %> 2" format = "DOCKER" } @@ -16,7 +16,7 @@ resource "google_artifact_registry_repository" "<%= ctx[:primary_resource_id] %> depends_on = [] location = "us-central1" repository_id = "<%= ctx[:vars]['repository_id'] %>" - description = "<%= ctx[:vars]['description'] %>" + description = "<%= ctx[:vars]['desc'] %>" format = "DOCKER" mode = "VIRTUAL_REPOSITORY" virtual_repository_config { diff --git a/mmv1/templates/terraform/examples/bigquery_analyticshub_data_exchange_basic.tf.erb b/mmv1/templates/terraform/examples/bigquery_analyticshub_data_exchange_basic.tf.erb index 987d8f978686..691eddb35fe1 100644 --- a/mmv1/templates/terraform/examples/bigquery_analyticshub_data_exchange_basic.tf.erb +++ b/mmv1/templates/terraform/examples/bigquery_analyticshub_data_exchange_basic.tf.erb @@ -2,5 +2,5 @@ resource "google_bigquery_analytics_hub_data_exchange" "<%= ctx[:primary_resourc location = "US" data_exchange_id = "<%= ctx[:vars]['data_exchange_id'] %>" display_name = "<%= ctx[:vars]['data_exchange_id'] %>" - description = "<%= ctx[:vars]['description'] %>" + description = "<%= ctx[:vars]['desc'] %>" } diff --git a/mmv1/templates/terraform/examples/bigquery_analyticshub_data_exchange_dcr.tf.erb b/mmv1/templates/terraform/examples/bigquery_analyticshub_data_exchange_dcr.tf.erb index 5f76a93d4d64..456ba8a564ed 100644 --- a/mmv1/templates/terraform/examples/bigquery_analyticshub_data_exchange_dcr.tf.erb +++ b/mmv1/templates/terraform/examples/bigquery_analyticshub_data_exchange_dcr.tf.erb @@ -2,7 +2,7 @@ resource "google_bigquery_analytics_hub_data_exchange" "<%= ctx[:primary_resourc location = "US" data_exchange_id = "<%= ctx[:vars]['data_exchange_id'] %>" display_name = "<%= ctx[:vars]['data_exchange_id'] %>" - description = "<%= ctx[:vars]['description'] %>" + description = "<%= ctx[:vars]['desc'] %>" sharing_environment_config { dcr_exchange_config {} } diff --git a/mmv1/templates/terraform/examples/bigquery_analyticshub_listing_basic.tf.erb b/mmv1/templates/terraform/examples/bigquery_analyticshub_listing_basic.tf.erb index 7099cbec2506..3c6fefab4b50 100644 --- a/mmv1/templates/terraform/examples/bigquery_analyticshub_listing_basic.tf.erb +++ b/mmv1/templates/terraform/examples/bigquery_analyticshub_listing_basic.tf.erb @@ -2,7 +2,7 @@ resource "google_bigquery_analytics_hub_data_exchange" "<%= ctx[:primary_resourc location = "US" data_exchange_id = "<%= ctx[:vars]['data_exchange_id'] %>" display_name = "<%= ctx[:vars]['data_exchange_id'] %>" - description = "<%= ctx[:vars]['description'] %>" + description = "<%= ctx[:vars]['desc'] %>" } resource "google_bigquery_analytics_hub_listing" "<%= ctx[:primary_resource_id] %>" { @@ -10,7 +10,7 @@ resource "google_bigquery_analytics_hub_listing" "<%= ctx[:primary_resource_id] data_exchange_id = google_bigquery_analytics_hub_data_exchange.<%= ctx[:primary_resource_id] %>.data_exchange_id listing_id = "<%= ctx[:vars]['listing_id'] %>" display_name = "<%= ctx[:vars]['listing_id'] %>" - description = "<%= ctx[:vars]['description'] %>" + description = "<%= ctx[:vars]['desc'] %>" bigquery_dataset { dataset = google_bigquery_dataset.<%= ctx[:primary_resource_id] %>.id @@ -20,6 +20,6 @@ resource "google_bigquery_analytics_hub_listing" "<%= ctx[:primary_resource_id] resource "google_bigquery_dataset" "<%= ctx[:primary_resource_id] %>" { dataset_id = "<%= ctx[:vars]['listing_id'] %>" friendly_name = "<%= ctx[:vars]['listing_id'] %>" - description = "<%= ctx[:vars]['description'] %>" + description = "<%= ctx[:vars]['desc'] %>" location = "US" } \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/bigquery_analyticshub_listing_dcr.tf.erb b/mmv1/templates/terraform/examples/bigquery_analyticshub_listing_dcr.tf.erb index 399a0d91de47..afa60930cc38 100644 --- a/mmv1/templates/terraform/examples/bigquery_analyticshub_listing_dcr.tf.erb +++ b/mmv1/templates/terraform/examples/bigquery_analyticshub_listing_dcr.tf.erb @@ -2,7 +2,7 @@ resource "google_bigquery_analytics_hub_data_exchange" "<%= ctx[:primary_resourc location = "US" data_exchange_id = "<%= ctx[:vars]['data_exchange_id'] %>" display_name = "<%= ctx[:vars]['data_exchange_id'] %>" - description = "<%= ctx[:vars]['description'] %>" + description = "<%= ctx[:vars]['desc'] %>" sharing_environment_config { dcr_exchange_config {} } @@ -13,7 +13,7 @@ resource "google_bigquery_analytics_hub_listing" "<%= ctx[:primary_resource_id] data_exchange_id = google_bigquery_analytics_hub_data_exchange.<%= ctx[:primary_resource_id] %>.data_exchange_id listing_id = "<%= ctx[:vars]['listing_id'] %>" display_name = "<%= ctx[:vars]['listing_id'] %>" - description = "<%= ctx[:vars]['description'] %>" + description = "<%= ctx[:vars]['desc'] %>" bigquery_dataset { dataset = google_bigquery_dataset.<%= ctx[:primary_resource_id] %>.id @@ -30,7 +30,7 @@ resource "google_bigquery_analytics_hub_listing" "<%= ctx[:primary_resource_id] resource "google_bigquery_dataset" "<%= ctx[:primary_resource_id] %>" { dataset_id = "<%= ctx[:vars]['listing_id'] %>" friendly_name = "<%= ctx[:vars]['listing_id'] %>" - description = "<%= ctx[:vars]['description'] %>" + description = "<%= ctx[:vars]['desc'] %>" location = "US" } diff --git a/mmv1/templates/terraform/examples/bigquery_analyticshub_listing_restricted.tf.erb b/mmv1/templates/terraform/examples/bigquery_analyticshub_listing_restricted.tf.erb index 7c3ba2242db1..6f73c97903e3 100644 --- a/mmv1/templates/terraform/examples/bigquery_analyticshub_listing_restricted.tf.erb +++ b/mmv1/templates/terraform/examples/bigquery_analyticshub_listing_restricted.tf.erb @@ -2,7 +2,7 @@ resource "google_bigquery_analytics_hub_data_exchange" "<%= ctx[:primary_resourc location = "US" data_exchange_id = "<%= ctx[:vars]['data_exchange_id'] %>" display_name = "<%= ctx[:vars]['data_exchange_id'] %>" - description = "<%= ctx[:vars]['description'] %>" + description = "<%= ctx[:vars]['desc'] %>" } resource "google_bigquery_analytics_hub_listing" "<%= ctx[:primary_resource_id] %>" { @@ -10,7 +10,7 @@ resource "google_bigquery_analytics_hub_listing" "<%= ctx[:primary_resource_id] data_exchange_id = google_bigquery_analytics_hub_data_exchange.<%= ctx[:primary_resource_id] %>.data_exchange_id listing_id = "<%= ctx[:vars]['listing_id'] %>" display_name = "<%= ctx[:vars]['listing_id'] %>" - description = "<%= ctx[:vars]['description'] %>" + description = "<%= ctx[:vars]['desc'] %>" bigquery_dataset { dataset = google_bigquery_dataset.<%= ctx[:primary_resource_id] %>.id @@ -25,6 +25,6 @@ resource "google_bigquery_analytics_hub_listing" "<%= ctx[:primary_resource_id] resource "google_bigquery_dataset" "<%= ctx[:primary_resource_id] %>" { dataset_id = "<%= ctx[:vars]['listing_id'] %>" friendly_name = "<%= ctx[:vars]['listing_id'] %>" - description = "<%= ctx[:vars]['description'] %>" + description = "<%= ctx[:vars]['desc'] %>" location = "US" } \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/access_context_manager_service_perimeter_dry_run_egress_policy.tf.tmpl b/mmv1/templates/terraform/examples/go/access_context_manager_service_perimeter_dry_run_egress_policy.tf.tmpl new file mode 100644 index 000000000000..17561629da0e --- /dev/null +++ b/mmv1/templates/terraform/examples/go/access_context_manager_service_perimeter_dry_run_egress_policy.tf.tmpl @@ -0,0 +1,36 @@ +resource "google_access_context_manager_service_perimeter" "storage-perimeter" { + parent = "accesspolicies/${google_access_context_manager_access_policy.access-policy.name}" + name = "accesspolicies/${google_access_context_manager_access_policy.access-policy.name}/serviceperimeters/storage-perimeter" + title = "Storage Perimeter" + spec { + restricted_services = ["storage.googleapis.com"] + } + lifecycle { + ignore_changes = [status[0].resources] + } +} + +resource "google_access_context_manager_service_perimeter_dry_run_egress_policy" "egress_policy" { + perimeter = "${google_access_context_manager_service_perimeter.storage-perimeter.name}" + egress_from { + identity_type = "ANY_IDENTITY" + } + egress_to { + resources = ["*"] + operations { + service_name = "bigquery.googleapis.com" + method_selectors { + method = "*" + } + } + } + lifecycle { + create_before_destroy = true + } +} + + +resource "google_access_context_manager_access_policy" "access-policy" { + parent = "organizations/123456789" + title = "Storage Policy" +} diff --git a/mmv1/templates/terraform/examples/go/access_context_manager_service_perimeter_dry_run_ingress_policy.tf.tmpl b/mmv1/templates/terraform/examples/go/access_context_manager_service_perimeter_dry_run_ingress_policy.tf.tmpl new file mode 100644 index 000000000000..df981c843515 --- /dev/null +++ b/mmv1/templates/terraform/examples/go/access_context_manager_service_perimeter_dry_run_ingress_policy.tf.tmpl @@ -0,0 +1,39 @@ +resource "google_access_context_manager_service_perimeter" "storage-perimeter" { + parent = "accesspolicies/${google_access_context_manager_access_policy.access-policy.name}" + name = "accesspolicies/${google_access_context_manager_access_policy.access-policy.name}/serviceperimeters/storage-perimeter" + title = "Storage Perimeter" + status { + restricted_services = ["storage.googleapis.com"] + } + lifecycle { + ignore_changes = [status[0].resources] + } +} + +resource "google_access_context_manager_service_perimeter_dry_run_ingress_policy" "ingress_policy" { + perimeter = "${google_access_context_manager_service_perimeter.storage-perimeter.name}" + ingress_from { + identity_type = "any_identity" + sources { + access_level = "*" + } + } + ingress_to { + resources = ["*"] + operations { + service_name = "bigquery.googleapis.com" + method_selectors { + method = "*" + } + } + } + lifecycle { + create_before_destroy = true + } +} + + +resource "google_access_context_manager_access_policy" "access-policy" { + parent = "organizations/123456789" + title = "Storage Policy" +} diff --git a/mmv1/templates/terraform/examples/go/active_directory_domain_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/active_directory_domain_basic.tf.tmpl index 25d7b18d8c8a..665ad26f8754 100644 --- a/mmv1/templates/terraform/examples/go/active_directory_domain_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/active_directory_domain_basic.tf.tmpl @@ -1,6 +1,6 @@ resource "google_active_directory_domain" "ad-domain" { domain_name = "{{index $.Vars "domain_name"}}.org.com" locations = ["us-central1"] - reserved_ip_range = "192.168.255.0/24" + reserved_ip_range = "192.168.255.0/24" deletion_protection = false } \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/active_directory_domain_trust_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/active_directory_domain_trust_basic.tf.tmpl index c16b4eab99d5..c1f9cf61d676 100644 --- a/mmv1/templates/terraform/examples/go/active_directory_domain_trust_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/active_directory_domain_trust_basic.tf.tmpl @@ -5,4 +5,5 @@ resource "google_active_directory_domain_trust" "ad-domain-trust" { trust_direction = "OUTBOUND" trust_type = "FOREST" trust_handshake_secret = "Testing1!" + deletion_protection = false } \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/active_directory_peering_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/active_directory_peering_basic.tf.tmpl index 51c61dea4d78..e18ce41918ff 100644 --- a/mmv1/templates/terraform/examples/go/active_directory_peering_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/active_directory_peering_basic.tf.tmpl @@ -3,6 +3,7 @@ resource "google_active_directory_peering" "ad-domain-peering" { domain_resource = google_active_directory_domain.ad-domain.name peering_id = "ad-domain-peering" authorized_network = google_compute_network.peered-network.id + deletion_protection = false labels = { foo = "bar" } @@ -14,6 +15,7 @@ resource "google_active_directory_domain" "ad-domain" { locations = ["us-central1"] reserved_ip_range = "192.168.255.0/24" authorized_networks = [google_compute_network.source-network.id] + deletion_protection = false } resource "google_compute_network" "peered-network" { diff --git a/mmv1/templates/terraform/examples/go/alloydb_cluster_restore.tf.tmpl b/mmv1/templates/terraform/examples/go/alloydb_cluster_restore.tf.tmpl index ad96d25ec95e..7713276440e6 100644 --- a/mmv1/templates/terraform/examples/go/alloydb_cluster_restore.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/alloydb_cluster_restore.tf.tmpl @@ -45,7 +45,6 @@ resource "google_alloydb_cluster" "restored_via_pitr" { network_config { network = data.google_compute_network.default.id } - restore_continuous_backup_source { cluster = google_alloydb_cluster.{{$.PrimaryResourceId}}.name point_in_time = "2023-08-03T19:19:00.094Z" diff --git a/mmv1/templates/terraform/examples/go/alloydb_instance_basic_test.tf.tmpl b/mmv1/templates/terraform/examples/go/alloydb_instance_basic_test.tf.tmpl index 7c2b09d69979..0ca5146f0ac8 100644 --- a/mmv1/templates/terraform/examples/go/alloydb_instance_basic_test.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/alloydb_instance_basic_test.tf.tmpl @@ -14,7 +14,6 @@ resource "google_alloydb_cluster" "{{$.PrimaryResourceId}}" { network_config { network = data.google_compute_network.default.id } - initial_user { password = "{{index $.Vars "alloydb_cluster_name"}}" } diff --git a/mmv1/templates/terraform/examples/go/alloydb_instance_psc_test.tf.tmpl b/mmv1/templates/terraform/examples/go/alloydb_instance_psc_test.tf.tmpl new file mode 100644 index 000000000000..d2d4712d0ae7 --- /dev/null +++ b/mmv1/templates/terraform/examples/go/alloydb_instance_psc_test.tf.tmpl @@ -0,0 +1,21 @@ +resource "google_alloydb_instance" "{{$.PrimaryResourceId}}" { + cluster = google_alloydb_cluster.{{$.PrimaryResourceId}}.name + instance_id = "{{index $.Vars "alloydb_instance_name"}}" + instance_type = "PRIMARY" + + machine_config { + cpu_count = 2 + } +} + +resource "google_alloydb_cluster" "{{$.PrimaryResourceId}}" { + cluster_id = "{{index $.Vars "alloydb_cluster_name"}}" + location = "us-central1" + + initial_user { + password = "{{index $.Vars "alloydb_cluster_name"}}" + } + psc_config { + psc_enabled = true + } +} diff --git a/mmv1/templates/terraform/examples/go/alloydb_secondary_instance_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/alloydb_secondary_instance_basic.tf.tmpl index 8b202bad443e..8e2eeb44e88d 100644 --- a/mmv1/templates/terraform/examples/go/alloydb_secondary_instance_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/alloydb_secondary_instance_basic.tf.tmpl @@ -22,7 +22,7 @@ resource "google_alloydb_cluster" "secondary" { cluster_id = "{{index $.Vars "alloydb_secondary_cluster_name"}}" location = "us-east1" network_config { - network = google_compute_network.default.id + network = data.google_compute_network.default.id } cluster_type = "SECONDARY" diff --git a/mmv1/templates/terraform/examples/go/alloydb_user_builtin.tf.tmpl b/mmv1/templates/terraform/examples/go/alloydb_user_builtin.tf.tmpl index 6ae9ee261194..b91e92abddce 100644 --- a/mmv1/templates/terraform/examples/go/alloydb_user_builtin.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/alloydb_user_builtin.tf.tmpl @@ -10,9 +10,8 @@ resource "google_alloydb_cluster" "default" { cluster_id = "{{index $.Vars "alloydb_cluster_name"}}" location = "us-central1" network_config { - network = google_compute_network.default.id + network = data.google_compute_network.default.id } - initial_user { password = "{{index $.Vars "alloydb_cluster_pass"}}" } diff --git a/mmv1/templates/terraform/examples/go/alloydb_user_builtin_test.tf.tmpl b/mmv1/templates/terraform/examples/go/alloydb_user_builtin_test.tf.tmpl index a46757d8a79e..01d0f92dd77c 100644 --- a/mmv1/templates/terraform/examples/go/alloydb_user_builtin_test.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/alloydb_user_builtin_test.tf.tmpl @@ -10,7 +10,6 @@ resource "google_alloydb_cluster" "default" { network_config { network = data.google_compute_network.default.id } - initial_user { password = "{{index $.Vars "alloydb_cluster_pass"}}" } diff --git a/mmv1/templates/terraform/examples/go/alloydb_user_iam_test.tf.tmpl b/mmv1/templates/terraform/examples/go/alloydb_user_iam_test.tf.tmpl index e3b2b4743180..eebb03454761 100644 --- a/mmv1/templates/terraform/examples/go/alloydb_user_iam_test.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/alloydb_user_iam_test.tf.tmpl @@ -10,7 +10,6 @@ resource "google_alloydb_cluster" "default" { network_config { network = data.google_compute_network.default.id } - initial_user { password = "{{index $.Vars "alloydb_cluster_pass"}}" } diff --git a/mmv1/templates/terraform/examples/go/apigee_environment_keyvaluemaps_beta_test.tf.tmpl b/mmv1/templates/terraform/examples/go/apigee_environment_keyvaluemaps_beta_test.tf.tmpl index 2d122300c59d..aa204c22c688 100644 --- a/mmv1/templates/terraform/examples/go/apigee_environment_keyvaluemaps_beta_test.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/apigee_environment_keyvaluemaps_beta_test.tf.tmpl @@ -5,6 +5,7 @@ resource "google_project" "project" { name = "tf-test%{random_suffix}" org_id = "{{index $.TestEnvVars "org_id"}}" billing_account = "{{index $.TestEnvVars "billing_account"}}" + deletion_policy = "DELETE" } resource "google_project_service" "apigee" { diff --git a/mmv1/templates/terraform/examples/go/apigee_environment_keyvaluemaps_entries_beta_test.tf.tmpl b/mmv1/templates/terraform/examples/go/apigee_environment_keyvaluemaps_entries_beta_test.tf.tmpl index 61d723d0897f..c7b3f6bf134c 100644 --- a/mmv1/templates/terraform/examples/go/apigee_environment_keyvaluemaps_entries_beta_test.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/apigee_environment_keyvaluemaps_entries_beta_test.tf.tmpl @@ -5,6 +5,7 @@ resource "google_project" "project" { name = "tf-test%{random_suffix}" org_id = "{{index $.TestEnvVars "org_id"}}" billing_account = "{{index $.TestEnvVars "billing_account"}}" + deletion_policy = "DELETE" } resource "google_project_service" "apigee" { diff --git a/mmv1/templates/terraform/examples/go/apigee_environment_keyvaluemaps_entries_test.tf.tmpl b/mmv1/templates/terraform/examples/go/apigee_environment_keyvaluemaps_entries_test.tf.tmpl index b1cc55fb2abc..f70f639f2a54 100644 --- a/mmv1/templates/terraform/examples/go/apigee_environment_keyvaluemaps_entries_test.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/apigee_environment_keyvaluemaps_entries_test.tf.tmpl @@ -3,6 +3,7 @@ resource "google_project" "project" { name = "tf-test-%{random_suffix}" org_id = "{{index $.TestEnvVars "org_id"}}" billing_account = "{{index $.TestEnvVars "billing_account"}}" + deletion_policy = "DELETE" } resource "google_project_service" "apigee" { diff --git a/mmv1/templates/terraform/examples/go/apigee_environment_keyvaluemaps_test.tf.tmpl b/mmv1/templates/terraform/examples/go/apigee_environment_keyvaluemaps_test.tf.tmpl index d08b3601898c..4265a85236f8 100644 --- a/mmv1/templates/terraform/examples/go/apigee_environment_keyvaluemaps_test.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/apigee_environment_keyvaluemaps_test.tf.tmpl @@ -3,6 +3,7 @@ resource "google_project" "project" { name = "tf-test-%{random_suffix}" org_id = "{{index $.TestEnvVars "org_id"}}" billing_account = "{{index $.TestEnvVars "billing_account"}}" + deletion_policy = "DELETE" } resource "google_project_service" "apigee" { diff --git a/mmv1/templates/terraform/examples/go/apphub_application_full.tf.tmpl b/mmv1/templates/terraform/examples/go/apphub_application_full.tf.tmpl index efb5464434f5..daf707b451c5 100644 --- a/mmv1/templates/terraform/examples/go/apphub_application_full.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/apphub_application_full.tf.tmpl @@ -5,7 +5,7 @@ resource "google_apphub_application" "{{$.PrimaryResourceId}}" { scope { type = "REGIONAL" } - description = "{{index $.Vars "description"}}" + description = "{{index $.Vars "desc"}}" attributes { environment { type = "STAGING" diff --git a/mmv1/templates/terraform/examples/go/apphub_service_full.tf.tmpl b/mmv1/templates/terraform/examples/go/apphub_service_full.tf.tmpl index e9a8ee7e82ef..31a52f86769e 100644 --- a/mmv1/templates/terraform/examples/go/apphub_service_full.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/apphub_service_full.tf.tmpl @@ -50,7 +50,7 @@ resource "google_apphub_service" "{{$.PrimaryResourceId}}" { service_id = google_compute_forwarding_rule.forwarding_rule.name discovered_service = data.google_apphub_discovered_service.catalog-service.name display_name = "{{index $.Vars "display_name"}}" - description = "{{index $.Vars "description"}}" + description = "{{index $.Vars "desc"}}" attributes { environment { type = "STAGING" diff --git a/mmv1/templates/terraform/examples/go/apphub_workload_full.tf.tmpl b/mmv1/templates/terraform/examples/go/apphub_workload_full.tf.tmpl index ca13f5699c30..2a51cfb09d59 100644 --- a/mmv1/templates/terraform/examples/go/apphub_workload_full.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/apphub_workload_full.tf.tmpl @@ -49,7 +49,7 @@ resource "google_apphub_workload" "{{$.PrimaryResourceId}}" { workload_id = google_compute_region_instance_group_manager.mig.name discovered_workload = data.google_apphub_discovered_workload.catalog-workload.name display_name = "{{index $.Vars "display_name"}}" - description = "{{index $.Vars "description"}}" + description = "{{index $.Vars "desc"}}" attributes { environment { type = "STAGING" diff --git a/mmv1/templates/terraform/examples/go/artifact_registry_repository_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/artifact_registry_repository_basic.tf.tmpl index 7a63233ab7a2..37687acc6960 100644 --- a/mmv1/templates/terraform/examples/go/artifact_registry_repository_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/artifact_registry_repository_basic.tf.tmpl @@ -1,6 +1,6 @@ resource "google_artifact_registry_repository" "{{$.PrimaryResourceId}}" { location = "us-central1" repository_id = "{{index $.Vars "repository_id"}}" - description = "{{index $.Vars "description"}}" + description = "{{index $.Vars "desc"}}" format = "DOCKER" } diff --git a/mmv1/templates/terraform/examples/go/artifact_registry_repository_cleanup.tf.tmpl b/mmv1/templates/terraform/examples/go/artifact_registry_repository_cleanup.tf.tmpl index 96a5b8a3bdb7..8853964941e1 100644 --- a/mmv1/templates/terraform/examples/go/artifact_registry_repository_cleanup.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/artifact_registry_repository_cleanup.tf.tmpl @@ -1,7 +1,7 @@ resource "google_artifact_registry_repository" "{{$.PrimaryResourceId}}" { location = "us-central1" repository_id = "{{index $.Vars "repository_id"}}" - description = "{{index $.Vars "description"}}" + description = "{{index $.Vars "desc"}}" format = "DOCKER" cleanup_policy_dry_run = false cleanup_policies { diff --git a/mmv1/templates/terraform/examples/go/artifact_registry_repository_docker.tf.tmpl b/mmv1/templates/terraform/examples/go/artifact_registry_repository_docker.tf.tmpl index 6c8e6181bdb4..b7c1bd729542 100644 --- a/mmv1/templates/terraform/examples/go/artifact_registry_repository_docker.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/artifact_registry_repository_docker.tf.tmpl @@ -1,7 +1,7 @@ resource "google_artifact_registry_repository" "{{$.PrimaryResourceId}}" { location = "us-central1" repository_id = "{{index $.Vars "repository_id"}}" - description = "{{index $.Vars "description"}}" + description = "{{index $.Vars "desc"}}" format = "DOCKER" docker_config { diff --git a/mmv1/templates/terraform/examples/go/artifact_registry_repository_remote.tf.tmpl b/mmv1/templates/terraform/examples/go/artifact_registry_repository_remote.tf.tmpl index 255cacab7263..a91876f75950 100644 --- a/mmv1/templates/terraform/examples/go/artifact_registry_repository_remote.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/artifact_registry_repository_remote.tf.tmpl @@ -1,7 +1,7 @@ resource "google_artifact_registry_repository" "{{$.PrimaryResourceId}}" { location = "us-central1" repository_id = "{{index $.Vars "repository_id"}}" - description = "{{index $.Vars "description"}}" + description = "{{index $.Vars "desc"}}" format = "DOCKER" mode = "REMOTE_REPOSITORY" remote_repository_config { diff --git a/mmv1/templates/terraform/examples/go/artifact_registry_repository_remote_apt.tf.tmpl b/mmv1/templates/terraform/examples/go/artifact_registry_repository_remote_apt.tf.tmpl index dae742f1bad9..ddffa4d557c1 100644 --- a/mmv1/templates/terraform/examples/go/artifact_registry_repository_remote_apt.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/artifact_registry_repository_remote_apt.tf.tmpl @@ -1,7 +1,7 @@ resource "google_artifact_registry_repository" "{{$.PrimaryResourceId}}" { location = "us-central1" repository_id = "{{index $.Vars "repository_id"}}" - description = "{{index $.Vars "description"}}" + description = "{{index $.Vars "desc"}}" format = "APT" mode = "REMOTE_REPOSITORY" remote_repository_config { diff --git a/mmv1/templates/terraform/examples/go/artifact_registry_repository_remote_docker_custom_with_auth.tf.tmpl b/mmv1/templates/terraform/examples/go/artifact_registry_repository_remote_docker_custom_with_auth.tf.tmpl index 562bff87fff5..17a3951db7f6 100644 --- a/mmv1/templates/terraform/examples/go/artifact_registry_repository_remote_docker_custom_with_auth.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/artifact_registry_repository_remote_docker_custom_with_auth.tf.tmpl @@ -21,7 +21,7 @@ resource "google_secret_manager_secret_iam_member" "secret-access" { resource "google_artifact_registry_repository" "{{$.PrimaryResourceId}}" { location = "us-central1" repository_id = "{{index $.Vars "repository_id"}}" - description = "{{index $.Vars "description"}}" + description = "{{index $.Vars "desc"}}" format = "DOCKER" mode = "REMOTE_REPOSITORY" remote_repository_config { diff --git a/mmv1/templates/terraform/examples/go/artifact_registry_repository_remote_dockerhub_auth.tf.tmpl b/mmv1/templates/terraform/examples/go/artifact_registry_repository_remote_dockerhub_auth.tf.tmpl index 92b0d4684730..e005c0e8ed59 100644 --- a/mmv1/templates/terraform/examples/go/artifact_registry_repository_remote_dockerhub_auth.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/artifact_registry_repository_remote_dockerhub_auth.tf.tmpl @@ -21,7 +21,7 @@ resource "google_secret_manager_secret_iam_member" "secret-access" { resource "google_artifact_registry_repository" "{{$.PrimaryResourceId}}" { location = "us-central1" repository_id = "{{index $.Vars "repository_id"}}" - description = "{{index $.Vars "description"}}" + description = "{{index $.Vars "desc"}}" format = "DOCKER" mode = "REMOTE_REPOSITORY" remote_repository_config { diff --git a/mmv1/templates/terraform/examples/go/artifact_registry_repository_remote_maven_custom_with_auth.tf.tmpl b/mmv1/templates/terraform/examples/go/artifact_registry_repository_remote_maven_custom_with_auth.tf.tmpl index 1f3d5696a7d2..e259ae40a08e 100644 --- a/mmv1/templates/terraform/examples/go/artifact_registry_repository_remote_maven_custom_with_auth.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/artifact_registry_repository_remote_maven_custom_with_auth.tf.tmpl @@ -21,7 +21,7 @@ resource "google_secret_manager_secret_iam_member" "secret-access" { resource "google_artifact_registry_repository" "{{$.PrimaryResourceId}}" { location = "us-central1" repository_id = "{{index $.Vars "repository_id"}}" - description = "{{index $.Vars "description"}}" + description = "{{index $.Vars "desc"}}" format = "MAVEN" mode = "REMOTE_REPOSITORY" remote_repository_config { diff --git a/mmv1/templates/terraform/examples/go/artifact_registry_repository_remote_npm_custom_with_auth.tf.tmpl b/mmv1/templates/terraform/examples/go/artifact_registry_repository_remote_npm_custom_with_auth.tf.tmpl index 5bd553e75a8b..8606714a3644 100644 --- a/mmv1/templates/terraform/examples/go/artifact_registry_repository_remote_npm_custom_with_auth.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/artifact_registry_repository_remote_npm_custom_with_auth.tf.tmpl @@ -21,7 +21,7 @@ resource "google_secret_manager_secret_iam_member" "secret-access" { resource "google_artifact_registry_repository" "{{$.PrimaryResourceId}}" { location = "us-central1" repository_id = "{{index $.Vars "repository_id"}}" - description = "{{index $.Vars "description"}}" + description = "{{index $.Vars "desc"}}" format = "NPM" mode = "REMOTE_REPOSITORY" remote_repository_config { diff --git a/mmv1/templates/terraform/examples/go/artifact_registry_repository_remote_python_custom_with_auth.tf.tmpl b/mmv1/templates/terraform/examples/go/artifact_registry_repository_remote_python_custom_with_auth.tf.tmpl index 2a0c1ec21685..fa58182a7b44 100644 --- a/mmv1/templates/terraform/examples/go/artifact_registry_repository_remote_python_custom_with_auth.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/artifact_registry_repository_remote_python_custom_with_auth.tf.tmpl @@ -21,7 +21,7 @@ resource "google_secret_manager_secret_iam_member" "secret-access" { resource "google_artifact_registry_repository" "{{$.PrimaryResourceId}}" { location = "us-central1" repository_id = "{{index $.Vars "repository_id"}}" - description = "{{index $.Vars "description"}}" + description = "{{index $.Vars "desc"}}" format = "PYTHON" mode = "REMOTE_REPOSITORY" remote_repository_config { diff --git a/mmv1/templates/terraform/examples/go/artifact_registry_repository_remote_yum.tf.tmpl b/mmv1/templates/terraform/examples/go/artifact_registry_repository_remote_yum.tf.tmpl index b8ac1ab35a0a..dd2bba09cf8f 100644 --- a/mmv1/templates/terraform/examples/go/artifact_registry_repository_remote_yum.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/artifact_registry_repository_remote_yum.tf.tmpl @@ -1,7 +1,7 @@ resource "google_artifact_registry_repository" "{{$.PrimaryResourceId}}" { location = "us-central1" repository_id = "{{index $.Vars "repository_id"}}" - description = "{{index $.Vars "description"}}" + description = "{{index $.Vars "desc"}}" format = "YUM" mode = "REMOTE_REPOSITORY" remote_repository_config { diff --git a/mmv1/templates/terraform/examples/go/artifact_registry_repository_virtual.tf.tmpl b/mmv1/templates/terraform/examples/go/artifact_registry_repository_virtual.tf.tmpl index 36e16607d05c..88ce7362dafb 100644 --- a/mmv1/templates/terraform/examples/go/artifact_registry_repository_virtual.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/artifact_registry_repository_virtual.tf.tmpl @@ -1,14 +1,14 @@ resource "google_artifact_registry_repository" "{{$.PrimaryResourceId}}-upstream-1" { location = "us-central1" repository_id = "{{index $.Vars "upstream_repository_id"}}-1" - description = "{{index $.Vars "upstream_description"}} 1" + description = "{{index $.Vars "upstream_desc"}} 1" format = "DOCKER" } resource "google_artifact_registry_repository" "{{$.PrimaryResourceId}}-upstream-2" { location = "us-central1" repository_id = "{{index $.Vars "upstream_repository_id"}}-2" - description = "{{index $.Vars "upstream_description"}} 2" + description = "{{index $.Vars "upstream_desc"}} 2" format = "DOCKER" } @@ -16,7 +16,7 @@ resource "google_artifact_registry_repository" "{{$.PrimaryResourceId}}" { depends_on = [] location = "us-central1" repository_id = "{{index $.Vars "repository_id"}}" - description = "{{index $.Vars "description"}}" + description = "{{index $.Vars "desc"}}" format = "DOCKER" mode = "VIRTUAL_REPOSITORY" virtual_repository_config { diff --git a/mmv1/templates/terraform/examples/go/backend_service_external_iap.tf.tmpl b/mmv1/templates/terraform/examples/go/backend_service_external_iap.tf.tmpl index 4ce60c795706..f2da2681ca83 100644 --- a/mmv1/templates/terraform/examples/go/backend_service_external_iap.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/backend_service_external_iap.tf.tmpl @@ -3,6 +3,7 @@ resource "google_compute_backend_service" "{{$.PrimaryResourceId}}" { protocol = "HTTP" load_balancing_scheme = "EXTERNAL" iap { + enabled = true oauth2_client_id = "abc" oauth2_client_secret = "xyz" } diff --git a/mmv1/templates/terraform/examples/go/backend_service_traffic_director_ring_hash.tf.tmpl b/mmv1/templates/terraform/examples/go/backend_service_traffic_director_ring_hash.tf.tmpl index 17fe9e39abc9..f40a7b76345c 100644 --- a/mmv1/templates/terraform/examples/go/backend_service_traffic_director_ring_hash.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/backend_service_traffic_director_ring_hash.tf.tmpl @@ -19,7 +19,15 @@ resource "google_compute_backend_service" "{{$.PrimaryResourceId}}" { } } outlier_detection { - consecutive_errors = 2 + consecutive_errors = 2 + consecutive_gateway_failure = 5 + enforcing_consecutive_errors = 100 + enforcing_consecutive_gateway_failure = 0 + enforcing_success_rate = 100 + max_ejection_percent = 10 + success_rate_minimum_hosts = 5 + success_rate_request_volume = 100 + success_rate_stdev_factor = 1900 } } diff --git a/mmv1/templates/terraform/examples/go/backup_dr_backup_vault_full.tf.tmpl b/mmv1/templates/terraform/examples/go/backup_dr_backup_vault_full.tf.tmpl new file mode 100644 index 000000000000..a2bf023066c1 --- /dev/null +++ b/mmv1/templates/terraform/examples/go/backup_dr_backup_vault_full.tf.tmpl @@ -0,0 +1,18 @@ +resource "google_backup_dr_backup_vault" "{{$.PrimaryResourceId}}" { + provider = google-beta + location = "us-central1" + backup_vault_id = "{{index $.Vars "backup_vault_id"}}" + description = "This is a second backup vault built by Terraform." + backup_minimum_enforced_retention_duration = "100000s" + labels = { + foo = "bar1" + bar = "baz1" + } + annotations = { + annotations1 = "bar1" + annotations2 = "baz1" + } + force_update = "true" + force_delete = "true" + allow_missing = "true" +} \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/bigquery_analyticshub_data_exchange_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/bigquery_analyticshub_data_exchange_basic.tf.tmpl index 4feac1f156c8..54cb026c6177 100644 --- a/mmv1/templates/terraform/examples/go/bigquery_analyticshub_data_exchange_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/bigquery_analyticshub_data_exchange_basic.tf.tmpl @@ -2,5 +2,5 @@ resource "google_bigquery_analytics_hub_data_exchange" "{{$.PrimaryResourceId}}" location = "US" data_exchange_id = "{{index $.Vars "data_exchange_id"}}" display_name = "{{index $.Vars "data_exchange_id"}}" - description = "{{index $.Vars "description"}}" + description = "{{index $.Vars "desc"}}" } diff --git a/mmv1/templates/terraform/examples/go/bigquery_analyticshub_data_exchange_dcr.tf.tmpl b/mmv1/templates/terraform/examples/go/bigquery_analyticshub_data_exchange_dcr.tf.tmpl new file mode 100644 index 000000000000..bc26d58a9a52 --- /dev/null +++ b/mmv1/templates/terraform/examples/go/bigquery_analyticshub_data_exchange_dcr.tf.tmpl @@ -0,0 +1,9 @@ +resource "google_bigquery_analytics_hub_data_exchange" "{{$.PrimaryResourceId}}" { + location = "US" + data_exchange_id = "{{index $.Vars "data_exchange_id"}}" + display_name = "{{index $.Vars "data_exchange_id"}}" + description = "{{index $.Vars "desc"}}" + sharing_environment_config { + dcr_exchange_config {} + } +} diff --git a/mmv1/templates/terraform/examples/go/bigquery_analyticshub_listing_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/bigquery_analyticshub_listing_basic.tf.tmpl index 6fc1e529a132..b9900df469f8 100644 --- a/mmv1/templates/terraform/examples/go/bigquery_analyticshub_listing_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/bigquery_analyticshub_listing_basic.tf.tmpl @@ -2,7 +2,7 @@ resource "google_bigquery_analytics_hub_data_exchange" "{{$.PrimaryResourceId}}" location = "US" data_exchange_id = "{{index $.Vars "data_exchange_id"}}" display_name = "{{index $.Vars "data_exchange_id"}}" - description = "{{index $.Vars "description"}}" + description = "{{index $.Vars "desc"}}" } resource "google_bigquery_analytics_hub_listing" "{{$.PrimaryResourceId}}" { @@ -10,7 +10,7 @@ resource "google_bigquery_analytics_hub_listing" "{{$.PrimaryResourceId}}" { data_exchange_id = google_bigquery_analytics_hub_data_exchange.{{$.PrimaryResourceId}}.data_exchange_id listing_id = "{{index $.Vars "listing_id"}}" display_name = "{{index $.Vars "listing_id"}}" - description = "{{index $.Vars "description"}}" + description = "{{index $.Vars "desc"}}" bigquery_dataset { dataset = google_bigquery_dataset.{{$.PrimaryResourceId}}.id @@ -20,6 +20,6 @@ resource "google_bigquery_analytics_hub_listing" "{{$.PrimaryResourceId}}" { resource "google_bigquery_dataset" "{{$.PrimaryResourceId}}" { dataset_id = "{{index $.Vars "listing_id"}}" friendly_name = "{{index $.Vars "listing_id"}}" - description = "{{index $.Vars "description"}}" + description = "{{index $.Vars "desc"}}" location = "US" } \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/bigquery_analyticshub_listing_dcr.tf.tmpl b/mmv1/templates/terraform/examples/go/bigquery_analyticshub_listing_dcr.tf.tmpl new file mode 100644 index 000000000000..1dbf2705cf73 --- /dev/null +++ b/mmv1/templates/terraform/examples/go/bigquery_analyticshub_listing_dcr.tf.tmpl @@ -0,0 +1,60 @@ +resource "google_bigquery_analytics_hub_data_exchange" "{{$.PrimaryResourceId}}" { + location = "US" + data_exchange_id = "{{index $.Vars "data_exchange_id"}}" + display_name = "{{index $.Vars "data_exchange_id"}}" + description = "{{index $.Vars "desc"}}" + sharing_environment_config { + dcr_exchange_config {} + } +} + +resource "google_bigquery_analytics_hub_listing" "{{$.PrimaryResourceId}}" { + location = "US" + data_exchange_id = google_bigquery_analytics_hub_data_exchange.{{$.PrimaryResourceId}}.data_exchange_id + listing_id = "{{index $.Vars "listing_id"}}" + display_name = "{{index $.Vars "listing_id"}}" + description = "{{index $.Vars "desc"}}" + + bigquery_dataset { + dataset = google_bigquery_dataset.{{$.PrimaryResourceId}}.id + selected_resources { + table = google_bigquery_table.{{$.PrimaryResourceId}}.id + } + } + + restricted_export_config { + enabled = true + } +} + +resource "google_bigquery_dataset" "{{$.PrimaryResourceId}}" { + dataset_id = "{{index $.Vars "listing_id"}}" + friendly_name = "{{index $.Vars "listing_id"}}" + description = "{{index $.Vars "desc"}}" + location = "US" +} + +resource "google_bigquery_table" "{{$.PrimaryResourceId}}" { + deletion_protection = false + table_id = "{{index $.Vars "listing_id"}}" + dataset_id = google_bigquery_dataset.{{$.PrimaryResourceId}}.dataset_id + schema = <" { redis_configs = { maxmemory-policy = "volatile-ttl" } - deletion_protection_enabled = <%= ctx[:vars]['deletion_protection_enabled'] == 'true' %> + deletion_protection_enabled = <%= ctx[:vars]['deletion_protection_enabled']%> zone_distribution_config { mode = "MULTI_ZONE" diff --git a/mmv1/templates/terraform/examples/redis_cluster_ha_single_zone.tf.erb b/mmv1/templates/terraform/examples/redis_cluster_ha_single_zone.tf.erb index 1e3eb14dd2ef..d03efb412529 100644 --- a/mmv1/templates/terraform/examples/redis_cluster_ha_single_zone.tf.erb +++ b/mmv1/templates/terraform/examples/redis_cluster_ha_single_zone.tf.erb @@ -20,7 +20,7 @@ resource "google_redis_cluster" "<%= ctx[:primary_resource_id] %>" { } } } - deletion_protection_enabled = <%= ctx[:vars]['deletion_protection_enabled'] == 'true' %> + deletion_protection_enabled = <%= ctx[:vars]['deletion_protection_enabled']%> depends_on = [ google_network_connectivity_service_connection_policy.default ] diff --git a/mmv1/templates/terraform/examples/tpu_node_full.tf.erb b/mmv1/templates/terraform/examples/tpu_node_full.tf.erb index cebe528b24ac..2ae3e8a2a597 100644 --- a/mmv1/templates/terraform/examples/tpu_node_full.tf.erb +++ b/mmv1/templates/terraform/examples/tpu_node_full.tf.erb @@ -23,6 +23,7 @@ resource "google_tpu_node" "<%= ctx[:primary_resource_id] %>" { use the default network in order to still demonstrate using as many fields as possible on the resource. -%> + network = google_service_networking_connection.private_service_connection.network labels = { diff --git a/mmv1/templates/terraform/examples/tpu_node_full_test.tf.erb b/mmv1/templates/terraform/examples/tpu_node_full_test.tf.erb index 3be36ecb4d64..8ed22bd87a51 100644 --- a/mmv1/templates/terraform/examples/tpu_node_full_test.tf.erb +++ b/mmv1/templates/terraform/examples/tpu_node_full_test.tf.erb @@ -10,22 +10,11 @@ resource "google_tpu_node" "<%= ctx[:primary_resource_id] %>" { accelerator_type = "v3-8" -<%#- - We previously used the first available version from the - google_tpu_tensorflow_versions data source. However, this started to return a - random set of versions which caused our tests to occasionally fail, so we pin - tensorflow_version to a specific version so that our tests pass reliably. --%> tensorflow_version = "2.10.0" description = "Terraform Google Provider test TPU" use_service_networking = true -<%#- - We previously used a separate network resource here, but TPUs only allow using 50 - different network names, ever. This caused our tests to start failing, so just - use the default network in order to still demonstrate using as many fields as - possible on the resource. --%> + network = data.google_compute_network.network.id labels = { diff --git a/mmv1/templates/terraform/extra_schema_entry/go/ssl_certificate.tmpl b/mmv1/templates/terraform/extra_schema_entry/go/ssl_certificate.tmpl index 589b25919642..e23b16ed87ec 100644 --- a/mmv1/templates/terraform/extra_schema_entry/go/ssl_certificate.tmpl +++ b/mmv1/templates/terraform/extra_schema_entry/go/ssl_certificate.tmpl @@ -7,11 +7,11 @@ Description: "Creates a unique name beginning with the specified prefix. Conflicts with name.", ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { // https://cloud.google.com/compute/docs/reference/latest/sslCertificates#resource - // uuid is 26 characters, limit the prefix to 37. + // uuid is 9 characters, limit the prefix to 54. value := v.(string) - if len(value) > 37 { + if len(value) > 54 { errors = append(errors, fmt.Errorf( - "%q cannot be longer than 37 characters, name is limited to 63", k)) + "%q cannot be longer than 54 characters, name is limited to 63", k)) } return }, diff --git a/mmv1/templates/terraform/iam/example_config_body/service_management_consumer.tf.erb b/mmv1/templates/terraform/iam/example_config_body/service_management_consumer.tf.erb index 7b2623648cb7..8cd2eddbf93a 100644 --- a/mmv1/templates/terraform/iam/example_config_body/service_management_consumer.tf.erb +++ b/mmv1/templates/terraform/iam/example_config_body/service_management_consumer.tf.erb @@ -1,2 +1,3 @@ + service_name = google_endpoints_service.endpoints_service.service_name consumer_project = "%{consumer_project}" \ No newline at end of file diff --git a/mmv1/templates/terraform/post_delete/go/private_cloud.go.tmpl b/mmv1/templates/terraform/post_delete/go/private_cloud.go.tmpl index f86c332a157d..3a72050de8b7 100644 --- a/mmv1/templates/terraform/post_delete/go/private_cloud.go.tmpl +++ b/mmv1/templates/terraform/post_delete/go/private_cloud.go.tmpl @@ -29,18 +29,11 @@ privateCloudPollRead := func(d *schema.ResourceData, meta interface{}) transport if err != nil { return res, err } - // if resource exists but is marked for deletion - log.Printf("[DEBUG] Fetching state of the private cloud.") - v, ok := res["state"] - if ok && v.(string) == "DELETED" { - log.Printf("[DEBUG] The Private cloud has been successfully marked for delayed deletion.") - return nil, nil - } return res, nil } } -err = transport_tpg.PollingWaitTime(privateCloudPollRead(d, meta), transport_tpg.PollCheckForAbsence, "Deleting {{$.Name}}", d.Timeout(schema.TimeoutDelete), 10) +err = transport_tpg.PollingWaitTime(privateCloudPollRead(d, meta), pollCheckForPrivateCloudAbsence, "Deleting {{$.Name}}", d.Timeout(schema.TimeoutDelete), 10) if err != nil { return fmt.Errorf("Error waiting to delete PrivateCloud: %s", err) -} \ No newline at end of file +} diff --git a/mmv1/templates/terraform/post_import/go/scc_v1_folder_notification_config.go.tmpl b/mmv1/templates/terraform/post_import/go/scc_v1_folder_notification_config.go.tmpl new file mode 100644 index 000000000000..d7d3e0d8e21c --- /dev/null +++ b/mmv1/templates/terraform/post_import/go/scc_v1_folder_notification_config.go.tmpl @@ -0,0 +1,12 @@ +idParts := strings.Split(d.Id(), "/") +if len(idParts) != 4 { + return nil, fmt.Errorf("unexpected format of ID (%q), expected folders/{{"{{"}}folder{{"}}"}}/notificationConfigs/{{"{{"}}config_id{{"}}"}}", d.Id()) +} + +if err := d.Set("folder", idParts[1]); err != nil { + return nil, fmt.Errorf("error setting folder: %s", err) +} + +if err := d.Set("config_id", idParts[3]); err != nil { + return nil, fmt.Errorf("error setting config_id: %s", err) +} \ No newline at end of file diff --git a/mmv1/templates/terraform/post_import/go/scc_v2_folder_notification_config.go.tmpl b/mmv1/templates/terraform/post_import/go/scc_v2_folder_notification_config.go.tmpl new file mode 100644 index 000000000000..10ebbe8b3032 --- /dev/null +++ b/mmv1/templates/terraform/post_import/go/scc_v2_folder_notification_config.go.tmpl @@ -0,0 +1,12 @@ +idParts := strings.Split(d.Id(), "/") +if len(idParts) != 6 { + return nil, fmt.Errorf("unexpected format of ID (%q), expected folders/{{"{{"}}folder{{"}}"}}/locations/{{"{{"}}location{{"}}"}}/notificationConfigs/{{"{{"}}config_id{{"}}"}}", d.Id()) +} + +if err := d.Set("folder", idParts[1]); err != nil { + return nil, fmt.Errorf("error setting folder: %s", err) +} + +if err := d.Set("config_id", idParts[5]); err != nil { + return nil, fmt.Errorf("error setting config_id: %s", err) +} diff --git a/mmv1/templates/terraform/post_import/go/scc_v2_organization_big_query_export_config.go.tmpl b/mmv1/templates/terraform/post_import/go/scc_v2_organization_big_query_export_config.go.tmpl new file mode 100644 index 000000000000..6449eb1851f9 --- /dev/null +++ b/mmv1/templates/terraform/post_import/go/scc_v2_organization_big_query_export_config.go.tmpl @@ -0,0 +1,12 @@ +idParts := strings.Split(d.Id(), "/") +if len(idParts) != 6 { + return nil, fmt.Errorf("unexpected format of ID (%q), expected organizations/{{"{{"}}organization{{"}}"}}/locations/{{"{{"}}location{{"}}"}}/bigQueryExports/{{"{{"}}big_query_export_id{{"}}"}}", d.Id()) +} + +if err := d.Set("organization", idParts[1]); err != nil { + return nil, fmt.Errorf("error setting organization: %s", err) +} + +if err := d.Set("big_query_export_id", idParts[5]); err != nil { + return nil, fmt.Errorf("error setting big_query_export_id: %s", err) +} diff --git a/mmv1/templates/terraform/post_update/go/compute_subnetwork.go.tmpl b/mmv1/templates/terraform/post_update/go/compute_subnetwork.go.tmpl new file mode 100644 index 000000000000..9fd468471617 --- /dev/null +++ b/mmv1/templates/terraform/post_update/go/compute_subnetwork.go.tmpl @@ -0,0 +1,72 @@ +if v, ok := d.GetOk("send_secondary_ip_range_if_empty"); ok && v.(bool) { + if sv, ok := d.GetOk("secondary_ip_range"); ok { + configValue := d.GetRawConfig().GetAttr("secondary_ip_range") + stateValue := sv.([]interface{}) + if configValue.LengthInt() == 0 && len(stateValue) != 0 { + log.Printf("[DEBUG] Sending empty secondary_ip_range in update") + obj := make(map[string]interface{}) + obj["secondaryIpRanges"] = make([]interface{}, 0) + + // The rest is the same as the secondary_ip_range generated update code + // without the secondaryIpRangesProp logic + + getUrl, err := tpgresource.ReplaceVars(d, config, "{{"{{"}}ComputeBasePath{{"}}"}}projects/{{"{{"}}project{{"}}"}}/regions/{{"{{"}}region{{"}}"}}/subnetworks/{{"{{"}}name{{"}}"}}") + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + getRes, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: getUrl, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeSubnetwork %q", d.Id())) + } + + obj["fingerprint"] = getRes["fingerprint"] + + url, err := tpgresource.ReplaceVars(d, config, "{{"{{"}}ComputeBasePath{{"}}"}}projects/{{"{{"}}project{{"}}"}}/regions/{{"{{"}}region{{"}}"}}/subnetworks/{{"{{"}}name{{"}}"}}") + if err != nil { + return err + } + + headers := make(http.Header) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + Headers: headers, + }) + if err != nil { + return fmt.Errorf("Error updating Subnetwork %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Subnetwork %q: %#v", d.Id(), res) + } + + err = ComputeOperationWaitTime( + config, res, project, "Updating Subnetwork", userAgent, + d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } + } +} \ No newline at end of file diff --git a/mmv1/templates/terraform/pre_create/go/access_context_manager_dry_run_resource.go.tmpl b/mmv1/templates/terraform/pre_create/go/access_context_manager_dry_run_resource.go.tmpl new file mode 100644 index 000000000000..7e9f036c027b --- /dev/null +++ b/mmv1/templates/terraform/pre_create/go/access_context_manager_dry_run_resource.go.tmpl @@ -0,0 +1 @@ +obj["use_explicit_dry_run_spec"] = true diff --git a/mmv1/templates/terraform/pre_create/go/vmwareengine_private_cloud.go.tmpl b/mmv1/templates/terraform/pre_create/go/vmwareengine_private_cloud.go.tmpl new file mode 100644 index 000000000000..2e0de80e70e8 --- /dev/null +++ b/mmv1/templates/terraform/pre_create/go/vmwareengine_private_cloud.go.tmpl @@ -0,0 +1,15 @@ +// Check if the project exists in a deleted state +pcMarkedForDeletion, err := isPrivateCloudInDeletedState(config, d, billingProject, userAgent) +if err != nil { + return fmt.Errorf("Error checking if Private Cloud exists and is marked for deletion: %s", err) +} +if pcMarkedForDeletion { + log.Printf("[DEBUG] Private Cloud exists and is marked for deletion. Triggering UNDELETE of the Private Cloud.\n") + url, err = tpgresource.ReplaceVars(d, config, "{{"{{"}}VmwareengineBasePath{{"}}"}}projects/{{"{{"}}project{{"}}"}}/locations/{{"{{"}}location{{"}}"}}/privateClouds/{{"{{"}}name{{"}}"}}:undelete") + if err != nil { + return err + } + obj = make(map[string]interface{}) +} else { + log.Printf("[DEBUG] Private Cloud is not found to be marked for deletion. Triggering CREATE of the Private Cloud.\n") +} diff --git a/mmv1/templates/terraform/pre_delete/go/cloudrunv2_job_deletion_policy.go.tmpl b/mmv1/templates/terraform/pre_delete/go/cloudrunv2_job_deletion_policy.go.tmpl new file mode 100644 index 000000000000..38c30189ea27 --- /dev/null +++ b/mmv1/templates/terraform/pre_delete/go/cloudrunv2_job_deletion_policy.go.tmpl @@ -0,0 +1,3 @@ +if d.Get("deletion_protection").(bool) { + return fmt.Errorf("cannot destroy job without setting deletion_protection=false and running `terraform apply`") +} \ No newline at end of file diff --git a/mmv1/templates/terraform/pre_delete/go/cloudrunv2_service_deletion_policy.go.tmpl b/mmv1/templates/terraform/pre_delete/go/cloudrunv2_service_deletion_policy.go.tmpl new file mode 100644 index 000000000000..444ac1bfe13c --- /dev/null +++ b/mmv1/templates/terraform/pre_delete/go/cloudrunv2_service_deletion_policy.go.tmpl @@ -0,0 +1,3 @@ +if d.Get("deletion_protection").(bool) { + return fmt.Errorf("cannot destroy service without setting deletion_protection=false and running `terraform apply`") +} \ No newline at end of file diff --git a/mmv1/templates/terraform/pre_delete/go/private_connection.go.tmpl b/mmv1/templates/terraform/pre_delete/go/private_connection.go.tmpl new file mode 100644 index 000000000000..e182b8aa347c --- /dev/null +++ b/mmv1/templates/terraform/pre_delete/go/private_connection.go.tmpl @@ -0,0 +1,5 @@ +// Add force=true query param to force deletion of private connection sub resources like Routes +url, err = transport_tpg.AddQueryParams(url, map[string]string{"force": strconv.FormatBool(true)}) +if err != nil { +return err +} \ No newline at end of file diff --git a/mmv1/templates/terraform/pre_delete/go/vertex_ai_index_endpoint_deployed_index.go.tmpl b/mmv1/templates/terraform/pre_delete/go/vertex_ai_index_endpoint_deployed_index.go.tmpl new file mode 100644 index 000000000000..22b6172e267e --- /dev/null +++ b/mmv1/templates/terraform/pre_delete/go/vertex_ai_index_endpoint_deployed_index.go.tmpl @@ -0,0 +1,3 @@ +obj = map[string]interface{}{ + "deployedIndexId": d.Get("deployed_index_id"), +} \ No newline at end of file diff --git a/mmv1/templates/terraform/pre_update/go/bigtable_app_profile.go.tmpl b/mmv1/templates/terraform/pre_update/go/bigtable_app_profile.go.tmpl index 603e77f32acc..307208274cb2 100644 --- a/mmv1/templates/terraform/pre_update/go/bigtable_app_profile.go.tmpl +++ b/mmv1/templates/terraform/pre_update/go/bigtable_app_profile.go.tmpl @@ -30,6 +30,16 @@ if newRouting != oldRouting { } } } + +_, hasStandardIsolation := obj["standardIsolation"] +_, hasDataBoostIsolationReadOnly := obj["dataBoostIsolationReadOnly"] +if hasStandardIsolation && hasDataBoostIsolationReadOnly { + // Due to the "conflicts" both fields should be present only if neither was + // previously specified and the user is now manually adding dataBoostIsolationReadOnly. + delete(obj, "standardIsolation") + updateMask = append(updateMask, "dataBoostIsolationReadOnly") +} + // updateMask is a URL parameter but not present in the schema, so ReplaceVars // won't set it url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) diff --git a/mmv1/templates/terraform/pre_update/go/spanner_database.go.tmpl b/mmv1/templates/terraform/pre_update/go/spanner_database.go.tmpl index 49e42175c1ac..f0ad51601fd6 100644 --- a/mmv1/templates/terraform/pre_update/go/spanner_database.go.tmpl +++ b/mmv1/templates/terraform/pre_update/go/spanner_database.go.tmpl @@ -8,7 +8,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and - limitations under the License. */}} + limitations under the License. +*/ -}} if obj["statements"] != nil { if len(obj["statements"].([]string)) == 0 { diff --git a/mmv1/templates/terraform/property_documentation.html.markdown.tmpl b/mmv1/templates/terraform/property_documentation.html.markdown.tmpl index d7cb4c524fee..97a860442891 100644 --- a/mmv1/templates/terraform/property_documentation.html.markdown.tmpl +++ b/mmv1/templates/terraform/property_documentation.html.markdown.tmpl @@ -21,7 +21,7 @@ (Deprecated) {{- end}} {{- end }} - {{- $.ResourceMetadata.FormatDocDescription $.Description true -}} + {{- $.ResourceMetadata.FormatDocDescription $.GetDescription true -}} {{- if and (and ($.IsA "Array") ($.ItemType.IsA "Enum")) (and (not $.Output) (not $.ItemType.SkipDocsValues))}} {{- if $.ItemType.DefaultValue }} Default value is `{{ $.ItemType.DefaultValue }}`. diff --git a/mmv1/templates/terraform/update_encoder/go/spanner_database.go.tmpl b/mmv1/templates/terraform/update_encoder/go/spanner_database.go.tmpl index 291a709b0238..85d3d94dc062 100644 --- a/mmv1/templates/terraform/update_encoder/go/spanner_database.go.tmpl +++ b/mmv1/templates/terraform/update_encoder/go/spanner_database.go.tmpl @@ -8,7 +8,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and - limitations under the License. */}} + limitations under the License. +*/ -}} if obj["versionRetentionPeriod"] != nil || obj["extraStatements"] != nil { old, new := d.GetChange("ddl") diff --git a/mmv1/templates/terraform/update_encoder/go/vertex_ai_index_endpoint_deployed_index.go.tmpl b/mmv1/templates/terraform/update_encoder/go/vertex_ai_index_endpoint_deployed_index.go.tmpl new file mode 100644 index 000000000000..ef56b550bd2c --- /dev/null +++ b/mmv1/templates/terraform/update_encoder/go/vertex_ai_index_endpoint_deployed_index.go.tmpl @@ -0,0 +1,3 @@ +obj["id"] = obj["deployedIndexId"] +delete(obj, "deployedIndexId") +return obj, nil \ No newline at end of file diff --git a/mmv1/templates/terraform/yaml_conversion.erb b/mmv1/templates/terraform/yaml_conversion.erb index 09100c79385b..ee67f6e95e06 100644 --- a/mmv1/templates/terraform/yaml_conversion.erb +++ b/mmv1/templates/terraform/yaml_conversion.erb @@ -39,6 +39,9 @@ exclude: <%= object.exclude %> <% unless object.readonly.nil? -%> readonly: <%= object.readonly %> <% end -%> +<% unless !object.skip_attribution_label -%> +skip_attribution_label: <%= object.skip_attribution_label %> +<% end -%> <% #references blocks -%> @@ -415,7 +418,7 @@ custom_code: <% end -%> <% custom_diff = object.custom_diff.reject { - |cdiff| cdiff == "tpgresource.SetLabelsDiff" || cdiff == "tpgresource.SetMetadataLabelsDiff" || cdiff == "tpgresource.SetAnnotationsDiff" || cdiff == "tpgresource.SetMetadataAnnotationsDiff" + |cdiff| cdiff == "tpgresource.SetLabelsDiff" || cdiff == "tpgresource.SetLabelsDiffWithoutAttributionLabel" || cdiff == "tpgresource.SetMetadataLabelsDiff" || cdiff == "tpgresource.SetAnnotationsDiff" || cdiff == "tpgresource.SetMetadataAnnotationsDiff" } -%> <% unless custom_diff.empty? -%> diff --git a/mmv1/third_party/terraform/acctest/go/test_utils.go.tmpl b/mmv1/third_party/terraform/acctest/go/test_utils.go.tmpl index 51e0e4947906..eccc72fdf7e5 100644 --- a/mmv1/third_party/terraform/acctest/go/test_utils.go.tmpl +++ b/mmv1/third_party/terraform/acctest/go/test_utils.go.tmpl @@ -49,10 +49,7 @@ func CheckDataSourceStateMatchesResourceStateWithIgnores(dataSourceName, resourc if _, ok := ignoreFields[k]; ok { continue } - if _, ok := ignoreFields["labels.%"]; ok && strings.HasPrefix(k, "labels.") { - continue - } - if _, ok := ignoreFields["terraform_labels.%"]; ok && strings.HasPrefix(k, "terraform_labels.") { + if strings.HasPrefix(k, "labels.") || strings.HasPrefix(k, "terraform_labels.") || strings.HasPrefix(k, "effective_labels.") { continue } if k == "%" { diff --git a/mmv1/third_party/terraform/go/go.mod b/mmv1/third_party/terraform/go/go.mod index 3162439eec0b..3d389c3577ad 100644 --- a/mmv1/third_party/terraform/go/go.mod +++ b/mmv1/third_party/terraform/go/go.mod @@ -117,4 +117,4 @@ require ( google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect -) +) \ No newline at end of file diff --git a/mmv1/third_party/terraform/go/main.go.tmpl b/mmv1/third_party/terraform/go/main.go.tmpl index d044f11fba2a..8bb1fb9fd4de 100644 --- a/mmv1/third_party/terraform/go/main.go.tmpl +++ b/mmv1/third_party/terraform/go/main.go.tmpl @@ -12,16 +12,6 @@ import ( "github.com/hashicorp/terraform-provider-google/google/fwprovider" "github.com/hashicorp/terraform-provider-google/google/provider" - ver "github.com/hashicorp/terraform-provider-google/version" -) - -var ( - // these will be set by the goreleaser configuration - // to appropriate values for the compiled binary - version string = ver.ProviderVersion - - // goreleaser can also pass the specific commit if you want - // commit string = "" ) func main() { diff --git a/mmv1/third_party/terraform/services/accesscontextmanager/go/resource_access_context_manager_access_policy_test.go.tmpl b/mmv1/third_party/terraform/services/accesscontextmanager/go/resource_access_context_manager_access_policy_test.go.tmpl index 9a45445de045..33fa97849f1d 100644 --- a/mmv1/third_party/terraform/services/accesscontextmanager/go/resource_access_context_manager_access_policy_test.go.tmpl +++ b/mmv1/third_party/terraform/services/accesscontextmanager/go/resource_access_context_manager_access_policy_test.go.tmpl @@ -28,7 +28,9 @@ func TestAccAccessContextManager(t *testing.T) { "access_levels": testAccAccessContextManagerAccessLevels_basicTest, "access_level_condition": testAccAccessContextManagerAccessLevelCondition_basicTest, "service_perimeter_egress_policy": testAccAccessContextManagerServicePerimeterEgressPolicy_basicTest, + "service_perimeter_dry_run_egress_policy": testAccAccessContextManagerServicePerimeterDryRunEgressPolicy_basicTest, "service_perimeter_ingress_policy": testAccAccessContextManagerServicePerimeterIngressPolicy_basicTest, + "service_perimeter_dry_run_ingress_policy": testAccAccessContextManagerServicePerimeterDryRunIngressPolicy_basicTest, "service_perimeters": testAccAccessContextManagerServicePerimeters_basicTest, "gcp_user_access_binding": testAccAccessContextManagerGcpUserAccessBinding_basicTest, "authorized_orgs_desc": testAccAccessContextManagerAuthorizedOrgsDesc_basicTest, diff --git a/mmv1/third_party/terraform/services/appengine/go/resource_app_engine_flexible_app_version_test.go.tmpl b/mmv1/third_party/terraform/services/appengine/go/resource_app_engine_flexible_app_version_test.go.tmpl new file mode 100644 index 000000000000..141be1e2be7d --- /dev/null +++ b/mmv1/third_party/terraform/services/appengine/go/resource_app_engine_flexible_app_version_test.go.tmpl @@ -0,0 +1,445 @@ +package appengine_test + +{{ if ne $.TargetVersionName `ga` -}} + +import ( + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" +) + +func TestAccAppEngineFlexibleAppVersion_update(t *testing.T) { + t.Skip("https://github.com/hashicorp/terraform-provider-google/issues/18239") + t.Parallel() + + context := map[string]interface{}{ + "org_id": envvar.GetTestOrgFromEnv(t), + "billing_account": envvar.GetTestBillingAccountFromEnv(t), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckAppEngineFlexibleAppVersionDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccAppEngineFlexibleAppVersion_python(context), + }, + { + ResourceName: "google_app_engine_flexible_app_version.foo", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"env_variables", "deployment", "entrypoint", "service", "noop_on_destroy"}, + }, + { + Config: testAccAppEngineFlexibleAppVersion_pythonUpdate(context), + }, + { + ResourceName: "google_app_engine_flexible_app_version.foo", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"env_variables", "deployment", "entrypoint", "service", "delete_service_on_destroy"}, + }, + }, + }) +} + +func testAccAppEngineFlexibleAppVersion_python(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_project" "my_project" { + provider = google-beta + name = "tf-test-appeng-flex%{random_suffix}" + project_id = "tf-test-appeng-flex%{random_suffix}" + org_id = "%{org_id}" + billing_account = "%{billing_account}" + deletion_policy = "DELETE" +} + +resource "google_project_service" "compute" { + provider = google-beta + project = google_project.my_project.project_id + service = "compute.googleapis.com" + + disable_dependent_services = false +} + +resource "google_project_service" "appengineflex" { + provider = google-beta + project = google_project.my_project.project_id + service = "appengineflex.googleapis.com" + + disable_dependent_services = false + depends_on = [google_project_service.compute] +} + +resource "google_compute_network" "network" { + provider = google-beta + project = google_project_service.compute.project + name = "custom" + auto_create_subnetworks = "false" +} + +resource "google_compute_subnetwork" "subnetwork" { + provider = google-beta + project = google_project_service.compute.project + name = "custom" + region = "us-central1" + network = google_compute_network.network.id + ip_cidr_range = "10.0.0.0/16" + private_ip_google_access = true +} + +resource "google_app_engine_application" "app" { + provider = google-beta + project = google_project.my_project.project_id + location_id = "us-central" +} + +resource "google_project_iam_member" "gae_api" { + provider = google-beta + project = google_project_service.appengineflex.project + role = "roles/compute.networkUser" + member = "serviceAccount:service-${google_project.my_project.number}@gae-api-prod.google.com.iam.gserviceaccount.com" +} + +resource "google_app_engine_standard_app_version" "foo" { + provider = google-beta + project = google_project_iam_member.gae_api.project + version_id = "v1" + service = "default" + runtime = "python38" + + entrypoint { + shell = "gunicorn -b :$PORT main:app" + } + + deployment { + files { + name = "main.py" + source_url = "https://storage.googleapis.com/${google_storage_bucket.bucket.name}/${google_storage_bucket_object.main.name}" + } + + files { + name = "requirements.txt" + source_url = "https://storage.googleapis.com/${google_storage_bucket.bucket.name}/${google_storage_bucket_object.requirements.name}" + } + } + + env_variables = { + port = "8000" + } + + noop_on_destroy = true +} + +resource "google_app_engine_flexible_app_version" "foo" { + provider = google-beta + project = google_project_iam_member.gae_api.project + version_id = "v1" + service = "custom" + runtime = "python" + + runtime_api_version = "1" + + resources { + cpu = 1 + memory_gb = 0.5 + disk_gb = 10 + } + + entrypoint { + shell = "gunicorn -b :$PORT main:app" + } + + flexible_runtime_settings { + operating_system = "ubuntu22" + runtime_version = "3.11" + } + + deployment { + files { + name = "main.py" + source_url = "https://storage.googleapis.com/${google_storage_bucket.bucket.name}/${google_storage_bucket_object.main.name}" + } + + files { + name = "requirements.txt" + source_url = "https://storage.googleapis.com/${google_storage_bucket.bucket.name}/${google_storage_bucket_object.requirements.name}" + } + + files { + name = "app.yaml" + source_url = "https://storage.googleapis.com/${google_storage_bucket.bucket.name}/${google_storage_bucket_object.yaml.name}" + } + } + + liveness_check { + path = "alive" + } + + readiness_check { + path = "ready" + } + + env_variables = { + port = "8000" + } + + network { + name = google_compute_network.network.name + subnetwork = google_compute_subnetwork.subnetwork.name + instance_ip_mode = "EXTERNAL" + } + + instance_class = "B1" + + manual_scaling { + instances = 1 + } + + noop_on_destroy = true + + depends_on = [google_app_engine_standard_app_version.foo] +} + +resource "google_storage_bucket" "bucket" { + provider = google-beta + project = google_project.my_project.project_id + name = "tf-test-%{random_suffix}-flex-ae-bucket" + location = "US" +} + +resource "google_storage_bucket_object" "yaml" { + provider = google-beta + name = "app.yaml" + bucket = google_storage_bucket.bucket.name + source = "./test-fixtures/hello-world-flask/app.yaml" +} + +resource "google_storage_bucket_object" "requirements" { + provider = google-beta + name = "requirements.txt" + bucket = google_storage_bucket.bucket.name + source = "./test-fixtures/hello-world-flask/requirements.txt" +} + +resource "google_storage_bucket_object" "main" { + provider = google-beta + name = "main.py" + bucket = google_storage_bucket.bucket.name + source = "./test-fixtures/hello-world-flask/main.py" +}`, context) +} + +func testAccAppEngineFlexibleAppVersion_pythonUpdate(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_project" "my_project" { + provider = google-beta + name = "tf-test-appeng-flex%{random_suffix}" + project_id = "tf-test-appeng-flex%{random_suffix}" + org_id = "%{org_id}" + billing_account = "%{billing_account}" + deletion_policy = "DELETE" +} + +resource "google_project_service" "compute" { + provider = google-beta + project = google_project.my_project.project_id + service = "compute.googleapis.com" + + disable_dependent_services = false +} + +resource "google_project_service" "appengineflex" { + provider = google-beta + project = google_project.my_project.project_id + service = "appengineflex.googleapis.com" + + disable_dependent_services = false + depends_on = [google_project_service.compute] +} + +resource "google_compute_network" "network" { + provider = google-beta + project = google_project_service.compute.project + name = "custom" + auto_create_subnetworks = "false" +} + +resource "google_compute_subnetwork" "subnetwork" { + provider = google-beta + project = google_project_service.compute.project + name = "custom" + region = "us-central1" + network = google_compute_network.network.id + ip_cidr_range = "10.0.0.0/16" + private_ip_google_access = true +} + +resource "google_app_engine_application" "app" { + provider = google-beta + project = google_project.my_project.project_id + location_id = "us-central" +} + +resource "google_project_iam_member" "gae_api" { + provider = google-beta + project = google_project_service.appengineflex.project + role = "roles/compute.networkUser" + member = "serviceAccount:service-${google_project.my_project.number}@gae-api-prod.google.com.iam.gserviceaccount.com" +} + +resource "google_app_engine_standard_app_version" "foo" { + provider = google-beta + project = google_project_iam_member.gae_api.project + version_id = "v1" + service = "default" + runtime = "python38" + + entrypoint { + shell = "gunicorn -b :$PORT main:app" + } + + deployment { + files { + name = "main.py" + source_url = "https://storage.googleapis.com/${google_storage_bucket.bucket.name}/${google_storage_bucket_object.main.name}" + } + + files { + name = "requirements.txt" + source_url = "https://storage.googleapis.com/${google_storage_bucket.bucket.name}/${google_storage_bucket_object.requirements.name}" + } + } + + env_variables = { + port = "8000" + } + + noop_on_destroy = true +} + +resource "google_app_engine_flexible_app_version" "foo" { + provider = google-beta + project = google_project_iam_member.gae_api.project + version_id = "v1" + service = "custom" + runtime = "python" + + runtime_api_version = "1" + + resources { + cpu = 1 + memory_gb = 1 + disk_gb = 10 + } + + entrypoint { + shell = "gunicorn -b :$PORT main:app" + } + + flexible_runtime_settings { + operating_system = "ubuntu22" + runtime_version = "3.11" + } + + deployment { + files { + name = "main.py" + source_url = "https://storage.googleapis.com/${google_storage_bucket.bucket.name}/${google_storage_bucket_object.main.name}" + } + + files { + name = "requirements.txt" + source_url = "https://storage.googleapis.com/${google_storage_bucket.bucket.name}/${google_storage_bucket_object.requirements.name}" + } + + files { + name = "app.yaml" + source_url = "https://storage.googleapis.com/${google_storage_bucket.bucket.name}/${google_storage_bucket_object.yaml.name}" + } + } + + liveness_check { + path = "" + } + + readiness_check { + path = "" + } + + env_variables = { + port = "8000" + } + + network { + name = google_compute_network.network.name + subnetwork = google_compute_subnetwork.subnetwork.name + instance_ip_mode = "INTERNAL" + } + + instance_class = "B2" + + manual_scaling { + instances = 2 + } + + delete_service_on_destroy = true + + depends_on = [google_app_engine_standard_app_version.foo] +} + +resource "google_storage_bucket" "bucket" { + provider = google-beta + project = google_project.my_project.project_id + name = "tf-test-%{random_suffix}-flex-ae-bucket" + location = "US" +} + +resource "google_storage_bucket_object" "yaml" { + provider = google-beta + name = "app.yaml" + bucket = google_storage_bucket.bucket.name + source = "./test-fixtures/hello-world-flask/app.yaml" +} + +resource "google_storage_bucket_object" "requirements" { + provider = google-beta + name = "requirements.txt" + bucket = google_storage_bucket.bucket.name + source = "./test-fixtures/hello-world-flask/requirements.txt" +} + +resource "google_storage_bucket_object" "main" { + provider = google-beta + name = "main.py" + bucket = google_storage_bucket.bucket.name + source = "./test-fixtures/hello-world-flask/main.py" +}`, context) +} + +// Remove when generated test is enabled +func testAccCheckAppEngineFlexibleAppVersionDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "google_app_engine_flexible_app_version" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + log.Printf("[DEBUG] Ignoring destroy during test") + } + + return nil + } +} + +{{ end }} diff --git a/mmv1/third_party/terraform/services/backupdr/go/resource_backup_dr_backup_vault_test.go.tmpl b/mmv1/third_party/terraform/services/backupdr/go/resource_backup_dr_backup_vault_test.go.tmpl new file mode 100644 index 000000000000..d9d95562a523 --- /dev/null +++ b/mmv1/third_party/terraform/services/backupdr/go/resource_backup_dr_backup_vault_test.go.tmpl @@ -0,0 +1,97 @@ +package backupdr_test +{{- if ne $.TargetVersionName "ga" }} + +import ( + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "testing" + "time" +) + +func TestAccBackupDRBackupVault_fullUpdate(t *testing.T) { + t.Parallel() + + timeNow := time.Now().UTC() + referenceTime := time.Date(timeNow.Year(), timeNow.Month(), timeNow.Day(), 0, 0, 0, 0, time.UTC) + + context := map[string]interface{}{ + "project": envvar.GetTestProjectFromEnv(), + "effective_time": referenceTime.Add(24 * time.Hour).Format(time.RFC3339), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccBackupDRBackupVault_fullCreate(context), + }, + { + ResourceName: "google_backup_dr_backup_vault.backup-vault-test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"allow_missing", "annotations", "backup_vault_id", "force_delete", "force_update", "labels", "location", "terraform_labels"}, + }, + { + Config: testAccBackupDRBackupVault_fullUpdate(context), + }, + { + ResourceName: "google_backup_dr_backup_vault.backup-vault-test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"allow_missing", "annotations", "backup_vault_id", "force_delete", "force_update", "labels", "location", "terraform_labels"}, + }, + }, + }) +} + +func testAccBackupDRBackupVault_fullCreate(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_backup_dr_backup_vault" "backup-vault-test" { + provider = google-beta + location = "us-central1" + backup_vault_id = "tf-test-backup-vault-test%{random_suffix}" + description = "This is a backup vault built by Terraform." + backup_minimum_enforced_retention_duration = "100000s" + effective_time = "%{effective_time}" + labels = { + foo = "bar" + bar = "baz" + } + annotations = { + annotations1 = "bar" + annotations2 = "baz" + } + force_update = "true" + force_delete = "true" + allow_missing = "true" +} +`, context) +} + +func testAccBackupDRBackupVault_fullUpdate(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_backup_dr_backup_vault" "backup-vault-test" { + provider = google-beta + location = "us-central1" + backup_vault_id = "tf-test-backup-vault-test%{random_suffix}" + description = "This is a second backup vault built by Terraform." + backup_minimum_enforced_retention_duration = "200000s" + effective_time = "%{effective_time}" + labels = { + foo = "bar1" + bar = "baz1" + } + annotations = { + annotations1 = "bar1" + annotations2 = "baz1" + } + force_update = "true" + force_delete = "true" + allow_missing = "true" +} +`, context) +} +{{- end }} \ No newline at end of file diff --git a/mmv1/third_party/terraform/services/bigquery/go/resource_bigquery_dataset_test.go.tmpl b/mmv1/third_party/terraform/services/bigquery/go/resource_bigquery_dataset_test.go.tmpl deleted file mode 100644 index 880e625ff419..000000000000 --- a/mmv1/third_party/terraform/services/bigquery/go/resource_bigquery_dataset_test.go.tmpl +++ /dev/null @@ -1,867 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 -package bigquery_test - -import ( - "fmt" - "regexp" - "strings" - "testing" - - "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/terraform" - "github.com/hashicorp/terraform-provider-google/google/acctest" - "github.com/hashicorp/terraform-provider-google/google/envvar" - "google.golang.org/api/bigquery/v2" -) - -func TestAccBigQueryDataset_basic(t *testing.T) { - t.Parallel() - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryDatasetDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryDataset_withoutLabels(datasetID), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckNoResourceAttr("google_bigquery_dataset.test", "labels.%"), - resource.TestCheckNoResourceAttr("google_bigquery_dataset.test", "effective_labels.%"), - ), - }, - { - ResourceName: "google_bigquery_dataset.test", - ImportState: true, - ImportStateVerify: true, - }, - { - Config: testAccBigQueryDataset(datasetID), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("google_bigquery_dataset.test", "labels.%", "2"), - resource.TestCheckResourceAttr("google_bigquery_dataset.test", "labels.env", "foo"), - resource.TestCheckResourceAttr("google_bigquery_dataset.test", "labels.default_table_expiration_ms", "3600000"), - - resource.TestCheckResourceAttr("google_bigquery_dataset.test", "effective_labels.%", "2"), - resource.TestCheckResourceAttr("google_bigquery_dataset.test", "effective_labels.env", "foo"), - resource.TestCheckResourceAttr("google_bigquery_dataset.test", "effective_labels.default_table_expiration_ms", "3600000"), - ), - }, - { - ResourceName: "google_bigquery_dataset.test", - ImportState: true, - ImportStateVerify: true, - // The labels field in the state is decided by the configuration. - // During importing, the configuration is unavailable, so the labels field in the state after importing is empty. - ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, - }, - { - Config: testAccBigQueryDatasetUpdated(datasetID), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("google_bigquery_dataset.test", "labels.%", "2"), - resource.TestCheckResourceAttr("google_bigquery_dataset.test", "labels.env", "bar"), - resource.TestCheckResourceAttr("google_bigquery_dataset.test", "labels.default_table_expiration_ms", "7200000"), - - resource.TestCheckResourceAttr("google_bigquery_dataset.test", "effective_labels.%", "2"), - resource.TestCheckResourceAttr("google_bigquery_dataset.test", "effective_labels.env", "bar"), - resource.TestCheckResourceAttr("google_bigquery_dataset.test", "effective_labels.default_table_expiration_ms", "7200000"), - ), - }, - { - ResourceName: "google_bigquery_dataset.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, - }, - { - Config: testAccBigQueryDatasetUpdated2(datasetID), - }, - { - ResourceName: "google_bigquery_dataset.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, - }, - { - Config: testAccBigQueryDataset_withoutLabels(datasetID), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckNoResourceAttr("google_bigquery_dataset.test", "labels.%"), - resource.TestCheckNoResourceAttr("google_bigquery_dataset.test", "effective_labels.%"), - ), - }, - { - ResourceName: "google_bigquery_dataset.test", - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func TestAccBigQueryDataset_withComputedLabels(t *testing.T) { - // Skip it in VCR test because of the randomness of uuid in "labels" field - // which causes the replaying mode after recording mode failing in VCR test - acctest.SkipIfVcr(t) - t.Parallel() - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - ExternalProviders: map[string]resource.ExternalProvider{ - "random": {}, - }, - CheckDestroy: testAccCheckBigQueryDatasetDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryDataset(datasetID), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("google_bigquery_dataset.test", "labels.%", "2"), - resource.TestCheckResourceAttr("google_bigquery_dataset.test", "labels.env", "foo"), - resource.TestCheckResourceAttr("google_bigquery_dataset.test", "labels.default_table_expiration_ms", "3600000"), - - resource.TestCheckResourceAttr("google_bigquery_dataset.test", "effective_labels.%", "2"), - resource.TestCheckResourceAttr("google_bigquery_dataset.test", "effective_labels.env", "foo"), - resource.TestCheckResourceAttr("google_bigquery_dataset.test", "effective_labels.default_table_expiration_ms", "3600000"), - ), - }, - { - ResourceName: "google_bigquery_dataset.test", - ImportState: true, - ImportStateVerify: true, - // The labels field in the state is decided by the configuration. - // During importing, the configuration is unavailable, so the labels field in the state after importing is empty. - ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, - }, - { - Config: testAccBigQueryDatasetUpdated_withComputedLabels(datasetID), - }, - { - ResourceName: "google_bigquery_dataset.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, - }, - }, - }) -} - -func TestAccBigQueryDataset_withProvider5(t *testing.T) { - acctest.SkipIfVcr(t) - t.Parallel() - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - oldVersion := map[string]resource.ExternalProvider{ - "google": { - VersionConstraint: "4.75.0", // a version that doesn't separate user defined labels and system labels - Source: "registry.terraform.io/hashicorp/google", - }, - } - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - CheckDestroy: testAccCheckBigQueryDatasetDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryDataset_withoutLabels(datasetID), - ExternalProviders: oldVersion, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckNoResourceAttr("google_bigquery_dataset.test", "labels.%"), - resource.TestCheckNoResourceAttr("google_bigquery_dataset.test", "effective_labels.%"), - ), - }, - { - Config: testAccBigQueryDataset(datasetID), - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("google_bigquery_dataset.test", "labels.%", "2"), - resource.TestCheckResourceAttr("google_bigquery_dataset.test", "labels.env", "foo"), - resource.TestCheckResourceAttr("google_bigquery_dataset.test", "labels.default_table_expiration_ms", "3600000"), - - resource.TestCheckResourceAttr("google_bigquery_dataset.test", "effective_labels.%", "2"), - resource.TestCheckResourceAttr("google_bigquery_dataset.test", "effective_labels.env", "foo"), - resource.TestCheckResourceAttr("google_bigquery_dataset.test", "effective_labels.default_table_expiration_ms", "3600000"), - ), - }, - }, - }) -} - -func TestAccBigQueryDataset_withOutOfBandLabels(t *testing.T) { - acctest.SkipIfVcr(t) - t.Parallel() - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryDatasetDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryDataset(datasetID), - Check: addOutOfBandLabels(t, datasetID), - }, - { - ResourceName: "google_bigquery_dataset.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"delete_contents_on_destroy", "labels", "terraform_labels"}, - }, - { - Config: testAccBigQueryDatasetUpdated(datasetID), - }, - { - ResourceName: "google_bigquery_dataset.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"delete_contents_on_destroy", "labels", "terraform_labels"}, - }, - { - Config: testAccBigQueryDatasetUpdated_withOutOfBandLabels(datasetID), - }, - { - ResourceName: "google_bigquery_dataset.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"delete_contents_on_destroy", "labels", "terraform_labels"}, - }, - }, - }) -} - -func TestAccBigQueryDataset_datasetWithContents(t *testing.T) { - t.Parallel() - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryDatasetDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryDatasetDeleteContents(datasetID), - Check: testAccAddTable(t, datasetID, tableID), - }, - { - ResourceName: "google_bigquery_dataset.contents_test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"delete_contents_on_destroy", "labels", "terraform_labels"}, - }, - }, - }) -} - -func TestAccBigQueryDataset_access(t *testing.T) { - t.Parallel() - - datasetID := fmt.Sprintf("tf_test_access_%s", acctest.RandString(t, 10)) - otherDatasetID := fmt.Sprintf("tf_test_other_%s", acctest.RandString(t, 10)) - otherTableID := fmt.Sprintf("tf_test_other_%s", acctest.RandString(t, 10)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryDatasetDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryDatasetWithOneAccess(datasetID), - }, - { - ResourceName: "google_bigquery_dataset.access_test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, - }, - { - Config: testAccBigQueryDatasetWithThreeAccess(datasetID), - }, - { - ResourceName: "google_bigquery_dataset.access_test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, - }, - { - Config: testAccBigQueryDatasetWithOneAccess(datasetID), - }, - { - ResourceName: "google_bigquery_dataset.access_test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, - }, - { - Config: testAccBigQueryDatasetWithViewAccess(datasetID, otherDatasetID, otherTableID), - }, - { - ResourceName: "google_bigquery_dataset.access_test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, - }, - }, - }) -} - -func TestAccBigQueryDataset_regionalLocation(t *testing.T) { - t.Parallel() - - datasetID1 := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryDatasetDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryRegionalDataset(datasetID1, "asia-south1"), - }, - { - ResourceName: "google_bigquery_dataset.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, - }, - }, - }) -} - -func TestAccBigQueryDataset_cmek(t *testing.T) { - t.Parallel() - - kms := acctest.BootstrapKMSKeyInLocation(t, "us") - pid := envvar.GetTestProjectFromEnv() - datasetID1 := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryDataset_cmek(pid, datasetID1, kms.CryptoKey.Name), - }, - { - ResourceName: "google_bigquery_dataset.test", - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func TestAccBigQueryDataset_storageBillModel(t *testing.T) { - t.Parallel() - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryDatasetDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryDatasetStorageBillingModel(datasetID), - }, - { - ResourceName: "google_bigquery_dataset.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, - }, - }, - }) -} - -func TestAccBigQueryDataset_invalidCharacterInID(t *testing.T) { - t.Parallel() - // Not an acceptance test. - acctest.SkipIfVcr(t) - - datasetID := fmt.Sprintf("tf_test_%s-with-hyphens", acctest.RandString(t, 10)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryDatasetDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryDataset(datasetID), - ExpectError: regexp.MustCompile("must contain only letters.+numbers.+or underscores.+"), - }, - }, - }) -} - -func TestAccBigQueryDataset_invalidLongID(t *testing.T) { - t.Parallel() - // Not an acceptance test. - acctest.SkipIfVcr(t) - - datasetSuffix := acctest.RandString(t, 10) - datasetID := fmt.Sprintf("tf_test_%s", strings.Repeat(datasetSuffix, 200)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryDatasetDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryDataset(datasetID), - ExpectError: regexp.MustCompile(".+cannot be greater than 1,024 characters"), - }, - }, - }) -} - -{{ if ne $.TargetVersionName `ga` -}} -func TestAccBigQueryDataset_bigqueryDatasetResourceTags_update(t *testing.T) { - t.Parallel() - - context := map[string]interface{}{ - "random_suffix": acctest.RandString(t, 10), - } - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), - CheckDestroy: testAccCheckBigQueryDatasetDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryDataset_bigqueryDatasetResourceTags_basic(context), - }, - { - ResourceName: "google_bigquery_dataset.dataset", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, - }, - { - Config: testAccBigQueryDataset_bigqueryDatasetResourceTags_update(context), - }, - { - ResourceName: "google_bigquery_dataset.dataset", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, - }, - }, - }) -} - -{{ end }} -func testAccAddTable(t *testing.T, datasetID string, tableID string) resource.TestCheckFunc { - // Not actually a check, but adds a table independently of terraform - return func(s *terraform.State) error { - config := acctest.GoogleProviderConfig(t) - table := &bigquery.Table{ - TableReference: &bigquery.TableReference{ - DatasetId: datasetID, - TableId: tableID, - ProjectId: config.Project, - }, - } - _, err := config.NewBigQueryClient(config.UserAgent).Tables.Insert(config.Project, datasetID, table).Do() - if err != nil { - return fmt.Errorf("Could not create table") - } - return nil - } -} - -func addOutOfBandLabels(t *testing.T, datasetID string) resource.TestCheckFunc { - // Not actually a check, but adds labels independently of terraform - return func(s *terraform.State) error { - config := acctest.GoogleProviderConfig(t) - - dataset, err := config.NewBigQueryClient(config.UserAgent).Datasets.Get(config.Project, datasetID).Do() - if err != nil { - return fmt.Errorf("Could not get dataset with ID %s", datasetID) - } - - dataset.Labels["outband_key"] = "test" - _, err = config.NewBigQueryClient(config.UserAgent).Datasets.Patch(config.Project, datasetID, dataset).Do() - if err != nil { - return fmt.Errorf("Could not update labele for the dataset") - } - return nil - } -} - -func testAccBigQueryDataset_withoutLabels(datasetID string) string { - return fmt.Sprintf(` -resource "google_bigquery_dataset" "test" { - dataset_id = "%s" - friendly_name = "foo" - description = "This is a foo description" - location = "EU" - default_partition_expiration_ms = 3600000 - default_table_expiration_ms = 3600000 -} -`, datasetID) -} - -func testAccBigQueryDataset(datasetID string) string { - return fmt.Sprintf(` -resource "google_bigquery_dataset" "test" { - dataset_id = "%s" - friendly_name = "foo" - description = "This is a foo description" - location = "EU" - default_partition_expiration_ms = 3600000 - default_table_expiration_ms = 3600000 - - labels = { - env = "foo" - default_table_expiration_ms = 3600000 - } -} -`, datasetID) -} - -func testAccBigQueryDatasetUpdated(datasetID string) string { - return fmt.Sprintf(` -resource "google_bigquery_dataset" "test" { - dataset_id = "%s" - friendly_name = "bar" - description = "This is a bar description" - location = "EU" - default_partition_expiration_ms = 7200000 - default_table_expiration_ms = 7200000 - - labels = { - env = "bar" - default_table_expiration_ms = 7200000 - } -} -`, datasetID) -} - -func testAccBigQueryDatasetUpdated_withOutOfBandLabels(datasetID string) string { - return fmt.Sprintf(` -resource "google_bigquery_dataset" "test" { - dataset_id = "%s" - friendly_name = "bar" - description = "This is a bar description" - location = "EU" - default_partition_expiration_ms = 7200000 - default_table_expiration_ms = 7200000 - - labels = { - env = "bar" - default_table_expiration_ms = 7200000 - outband_key = "test-update" - } -} -`, datasetID) -} - -func testAccBigQueryDatasetUpdated2(datasetID string) string { - return fmt.Sprintf(` -resource "google_bigquery_dataset" "test" { - dataset_id = "%s" - # friendly_name = "bar" - description = "This is a bar description" - location = "EU" - default_partition_expiration_ms = 7200000 - default_table_expiration_ms = 7200000 - - labels = { - env = "bar" - default_table_expiration_ms = 7200000 - } -} -`, datasetID) -} - -func testAccBigQueryDatasetUpdated_withComputedLabels(datasetID string) string { - return fmt.Sprintf(` -resource "random_uuid" "test" { -} - -resource "google_bigquery_dataset" "test" { - dataset_id = "%s" - # friendly_name = "bar" - description = "This is a bar description" - location = "EU" - default_partition_expiration_ms = 7200000 - default_table_expiration_ms = 7200000 - - labels = { - env = "${random_uuid.test.result}" - default_table_expiration_ms = 7200000 - } -} -`, datasetID) -} - -func testAccBigQueryDatasetDeleteContents(datasetID string) string { - return fmt.Sprintf(` -resource "google_bigquery_dataset" "contents_test" { - dataset_id = "%s" - friendly_name = "foo" - description = "This is a foo description" - location = "EU" - default_partition_expiration_ms = 3600000 - default_table_expiration_ms = 3600000 - delete_contents_on_destroy = true - - labels = { - env = "foo" - default_table_expiration_ms = 3600000 - } -} -`, datasetID) -} - -func testAccBigQueryRegionalDataset(datasetID string, location string) string { - return fmt.Sprintf(` -resource "google_bigquery_dataset" "test" { - dataset_id = "%s" - friendly_name = "foo" - description = "This is a foo description" - location = "%s" - default_table_expiration_ms = 3600000 - - labels = { - env = "foo" - default_table_expiration_ms = 3600000 - } -} -`, datasetID, location) -} - -func testAccBigQueryDatasetWithOneAccess(datasetID string) string { - return fmt.Sprintf(` -resource "google_bigquery_dataset" "access_test" { - dataset_id = "%s" - - access { - role = "OWNER" - user_by_email = "Joe@example.com" - } - - labels = { - env = "foo" - default_table_expiration_ms = 3600000 - } -} -`, datasetID) -} - -func testAccBigQueryDatasetWithThreeAccess(datasetID string) string { - return fmt.Sprintf(` -resource "google_bigquery_dataset" "access_test" { - dataset_id = "%s" - - access { - role = "OWNER" - user_by_email = "Joe@example.com" - } - access { - role = "READER" - domain = "hashicorp.com" - } - access { - role = "READER" - iam_member = "allUsers" - } - - labels = { - env = "foo" - default_table_expiration_ms = 3600000 - } -} -`, datasetID) -} - -func testAccBigQueryDatasetWithViewAccess(datasetID, otherDatasetID, otherTableID string) string { - // Note that we have to add a non-view access to prevent BQ from creating 4 default - // access entries. - return fmt.Sprintf(` -resource "google_bigquery_dataset" "other_dataset" { - dataset_id = "%s" -} - -resource "google_bigquery_table" "table_with_view" { - deletion_protection = false - table_id = "%s" - dataset_id = google_bigquery_dataset.other_dataset.dataset_id - - time_partitioning { - type = "DAY" - } - - view { - query = "SELECT state FROM [lookerdata:cdc.project_tycho_reports]" - use_legacy_sql = true - } -} - -resource "google_bigquery_dataset" "access_test" { - dataset_id = "%s" - - access { - role = "OWNER" - user_by_email = "Joe@example.com" - } - access { - view { - project_id = google_bigquery_dataset.other_dataset.project - dataset_id = google_bigquery_dataset.other_dataset.dataset_id - table_id = google_bigquery_table.table_with_view.table_id - } - } - - labels = { - env = "foo" - default_table_expiration_ms = 3600000 - } -} -`, otherDatasetID, otherTableID, datasetID) -} - -func testAccBigQueryDataset_cmek(pid, datasetID, kmsKey string) string { - return fmt.Sprintf(` -data "google_project" "project" { - project_id = "%s" -} - -resource "google_kms_crypto_key_iam_member" "kms-member" { - crypto_key_id = "%s" - role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" - member = "serviceAccount:bq-${data.google_project.project.number}@bigquery-encryption.iam.gserviceaccount.com" -} - -resource "google_bigquery_dataset" "test" { - dataset_id = "%s" - friendly_name = "test" - description = "This is a test description" - location = "US" - default_table_expiration_ms = 3600000 - - default_encryption_configuration { - kms_key_name = "%s" - } - - depends_on = [google_kms_crypto_key_iam_member.kms-member] -} -`, pid, kmsKey, datasetID, kmsKey) -} - -func testAccBigQueryDatasetStorageBillingModel(datasetID string) string { - return fmt.Sprintf(` -resource "google_bigquery_dataset" "test" { - dataset_id = "%s" - friendly_name = "foo" - description = "This is a foo description" - location = "EU" - default_partition_expiration_ms = 3600000 - default_table_expiration_ms = 3600000 - storage_billing_model = "PHYSICAL" - - labels = { - env = "foo" - default_table_expiration_ms = 3600000 - } -} -`, datasetID) -} -{{- if ne $.TargetVersionName "ga" }} - -func testAccBigQueryDataset_bigqueryDatasetResourceTags_basic(context map[string]interface{}) string { - return acctest.Nprintf(` -data "google_project" "project" { - provider = "google-beta" -} - -resource "google_tags_tag_key" "tag_key1" { - provider = google-beta - parent = "projects/${data.google_project.project.number}" - short_name = "tf_test_tag_key1%{random_suffix}" -} - -resource "google_tags_tag_value" "tag_value1" { - provider = google-beta - parent = "tagKeys/${google_tags_tag_key.tag_key1.name}" - short_name = "tf_test_tag_value1%{random_suffix}" -} - -resource "google_tags_tag_key" "tag_key2" { - provider = google-beta - parent = "projects/${data.google_project.project.number}" - short_name = "tf_test_tag_key2%{random_suffix}" -} - -resource "google_tags_tag_value" "tag_value2" { - provider = google-beta - parent = "tagKeys/${google_tags_tag_key.tag_key2.name}" - short_name = "tf_test_tag_value2%{random_suffix}" -} - -resource "google_bigquery_dataset" "dataset" { - provider = google-beta - - dataset_id = "dataset%{random_suffix}" - friendly_name = "test" - description = "This is a test description" - location = "EU" - - resource_tags = { - "${data.google_project.project.project_id}/${google_tags_tag_key.tag_key1.short_name}" = "${google_tags_tag_value.tag_value1.short_name}" - "${data.google_project.project.project_id}/${google_tags_tag_key.tag_key2.short_name}" = "${google_tags_tag_value.tag_value2.short_name}" - } -} -`, context) -} - -func testAccBigQueryDataset_bigqueryDatasetResourceTags_update(context map[string]interface{}) string { - return acctest.Nprintf(` -data "google_project" "project" { - provider = "google-beta" -} - -resource "google_tags_tag_key" "tag_key1" { - provider = google-beta - parent = "projects/${data.google_project.project.number}" - short_name = "tf_test_tag_key1%{random_suffix}" -} - -resource "google_tags_tag_value" "tag_value1" { - provider = google-beta - parent = "tagKeys/${google_tags_tag_key.tag_key1.name}" - short_name = "tf_test_tag_value1%{random_suffix}" -} - -resource "google_tags_tag_key" "tag_key2" { - provider = google-beta - parent = "projects/${data.google_project.project.number}" - short_name = "tf_test_tag_key2%{random_suffix}" -} - -resource "google_tags_tag_value" "tag_value2" { - provider = google-beta - parent = "tagKeys/${google_tags_tag_key.tag_key2.name}" - short_name = "tf_test_tag_value2%{random_suffix}" -} - -resource "google_bigquery_dataset" "dataset" { - provider = google-beta - - dataset_id = "dataset%{random_suffix}" - friendly_name = "test" - description = "This is a test description" - location = "EU" - - resource_tags = { - } -} -`, context) -} -{{- end }} diff --git a/mmv1/third_party/terraform/services/bigquery/go/resource_bigquery_table.go.tmpl b/mmv1/third_party/terraform/services/bigquery/go/resource_bigquery_table.go.tmpl deleted file mode 100644 index b1b80240c433..000000000000 --- a/mmv1/third_party/terraform/services/bigquery/go/resource_bigquery_table.go.tmpl +++ /dev/null @@ -1,2961 +0,0 @@ -package bigquery - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "log" - "reflect" - "regexp" - "sort" - "strconv" - "strings" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - - "github.com/hashicorp/terraform-provider-google/google/tpgresource" - transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" - "google.golang.org/api/bigquery/v2" -) - -func bigQueryTableSortArrayByName(array []interface{}) { - sort.Slice(array, func(i, k int) bool { - return array[i].(map[string]interface{})["name"].(string) < array[k].(map[string]interface{})["name"].(string) - }) -} - -func bigQueryArrayToMapIndexedByName(array []interface{}) map[string]interface{} { - out := map[string]interface{}{} - for _, v := range array { - name := v.(map[string]interface{})["name"].(string) - out[name] = v - } - return out -} - -func bigQueryTablecheckNameExists(jsonList []interface{}) error { - for _, m := range jsonList { - if _, ok := m.(map[string]interface{})["name"]; !ok { - return fmt.Errorf("No name in schema %+v", m) - } - } - - return nil -} - -// Compares two json's while optionally taking in a compareMapKeyVal function. -// This function will override any comparison of a given map[string]interface{} -// on a specific key value allowing for a separate equality in specific scenarios -func jsonCompareWithMapKeyOverride(key string, a, b interface{}, compareMapKeyVal func(key string, val1, val2 map[string]interface{}) bool) (bool, error) { - switch a.(type) { - case []interface{}: - arrayA := a.([]interface{}) - arrayB, ok := b.([]interface{}) - if !ok { - return false, nil - } else if len(arrayA) != len(arrayB) { - return false, nil - } - - // Sort fields by name so reordering them doesn't cause a diff. - if key == "schema" || key == "fields" { - if err := bigQueryTablecheckNameExists(arrayA); err != nil { - return false, err - } - bigQueryTableSortArrayByName(arrayA) - if err := bigQueryTablecheckNameExists(arrayB); err != nil { - return false, err - } - bigQueryTableSortArrayByName(arrayB) - } - for i := range arrayA { - eq, err := jsonCompareWithMapKeyOverride(strconv.Itoa(i), arrayA[i], arrayB[i], compareMapKeyVal) - if err != nil { - return false, err - } else if !eq { - return false, nil - } - } - return true, nil - case map[string]interface{}: - objectA := a.(map[string]interface{}) - objectB, ok := b.(map[string]interface{}) - if !ok { - return false, nil - } - - var unionOfKeys map[string]bool = make(map[string]bool) - for subKey := range objectA { - unionOfKeys[subKey] = true - } - for subKey := range objectB { - unionOfKeys[subKey] = true - } - - for subKey := range unionOfKeys { - eq := compareMapKeyVal(subKey, objectA, objectB) - if !eq { - valA, ok1 := objectA[subKey] - valB, ok2 := objectB[subKey] - if !ok1 || !ok2 { - return false, nil - } - eq, err := jsonCompareWithMapKeyOverride(subKey, valA, valB, compareMapKeyVal) - if err != nil || !eq { - return false, err - } - } - } - return true, nil - case string, float64, bool, nil: - return a == b, nil - default: - log.Printf("[DEBUG] tried to iterate through json but encountered a non native type to json deserialization... please ensure you are passing a json object from json.Unmarshall") - return false, errors.New("unable to compare values") - } -} - -// checks if the value is within the array, only works for generics -// because objects and arrays will take the reference comparison -func valueIsInArray(value interface{}, array []interface{}) bool { - for _, item := range array { - if item == value { - return true - } - } - return false -} - -func bigQueryTableMapKeyOverride(key string, objectA, objectB map[string]interface{}) bool { - // we rely on the fallback to nil if the object does not have the key - valA := objectA[key] - valB := objectB[key] - switch key { - case "mode": - eq := bigQueryTableNormalizeMode(valA) == bigQueryTableNormalizeMode(valB) - return eq - case "description": - equivalentSet := []interface{}{nil, ""} - eq := valueIsInArray(valA, equivalentSet) && valueIsInArray(valB, equivalentSet) - return eq - case "type": - if valA == nil || valB == nil { - return false - } - return bigQueryTableTypeEq(valA.(string), valB.(string)) - case "policyTags": - eq := bigQueryTableNormalizePolicyTags(valA) == nil && bigQueryTableNormalizePolicyTags(valB) == nil - return eq - } - - // otherwise rely on default behavior - return false -} - -// Compare the JSON strings are equal -func bigQueryTableSchemaDiffSuppress(name, old, new string, _ *schema.ResourceData) bool { - // The API can return an empty schema which gets encoded to "null" during read. - if old == "null" { - old = "[]" - } - var a, b interface{} - if err := json.Unmarshal([]byte(old), &a); err != nil { - log.Printf("[DEBUG] unable to unmarshal old json - %v", err) - } - if err := json.Unmarshal([]byte(new), &b); err != nil { - log.Printf("[DEBUG] unable to unmarshal new json - %v", err) - } - - eq, err := jsonCompareWithMapKeyOverride(name, a, b, bigQueryTableMapKeyOverride) - if err != nil { - log.Printf("[DEBUG] %v", err) - log.Printf("[DEBUG] Error comparing JSON: %v, %v", old, new) - } - - return eq -} - -func bigQueryTableConnectionIdSuppress(name, old, new string, _ *schema.ResourceData) bool { - // API accepts connectionId in below two formats - // "{{"{{"}}project{{"}}"}}.{{"{{"}}location{{"}}"}}.{{"{{"}}connection_id{{"}}"}}" or - // "projects/{{"{{"}}project{{"}}"}}/locations/{{"{{"}}location{{"}}"}}/connections/{{"{{"}}connection_id{{"}}"}}". - // but always returns "{{"{{"}}project{{"}}"}}.{{"{{"}}location{{"}}"}}.{{"{{"}}connection_id{{"}}"}}" - - if tpgresource.IsEmptyValue(reflect.ValueOf(old)) || tpgresource.IsEmptyValue(reflect.ValueOf(new)) { - return false - } - - // Old is in the dot format, and new is in the slash format. - // They represent the same connection if the project, locaition, and IDs are - // the same. - // Location should use a case-insenstive comparison. - dotRe := regexp.MustCompile(`(.+)\.(.+)\.(.+)`) - slashRe := regexp.MustCompile("projects/(.+)/(?:locations|regions)/(.+)/connections/(.+)") - dotMatches := dotRe.FindStringSubmatch(old) - slashMatches := slashRe.FindStringSubmatch(new) - if dotMatches != nil && slashMatches != nil { - sameProject := dotMatches[1] == slashMatches[1] - sameLocation := strings.EqualFold(dotMatches[2], slashMatches[2]) - sameId := dotMatches[3] == slashMatches[3] - return sameProject && sameLocation && sameId - } - - return false -} - -func bigQueryTableTypeEq(old, new string) bool { - // Do case-insensitive comparison. https://github.com/hashicorp/terraform-provider-google/issues/9472 - oldUpper := strings.ToUpper(old) - newUpper := strings.ToUpper(new) - - equivalentSet1 := []interface{}{"INTEGER", "INT64"} - equivalentSet2 := []interface{}{"FLOAT", "FLOAT64"} - equivalentSet3 := []interface{}{"BOOLEAN", "BOOL"} - eq0 := oldUpper == newUpper - eq1 := valueIsInArray(oldUpper, equivalentSet1) && valueIsInArray(newUpper, equivalentSet1) - eq2 := valueIsInArray(oldUpper, equivalentSet2) && valueIsInArray(newUpper, equivalentSet2) - eq3 := valueIsInArray(oldUpper, equivalentSet3) && valueIsInArray(newUpper, equivalentSet3) - eq := eq0 || eq1 || eq2 || eq3 - return eq -} - -func bigQueryTableNormalizeMode(mode interface{}) string { - if mode == nil { - return "NULLABLE" - } - // Upper-case to get case-insensitive comparisons. https://github.com/hashicorp/terraform-provider-google/issues/9472 - return strings.ToUpper(mode.(string)) -} - -func bigQueryTableModeIsForceNew(old, new string) bool { - eq := old == new - reqToNull := old == "REQUIRED" && new == "NULLABLE" - return !eq && !reqToNull -} - -func bigQueryTableNormalizePolicyTags(val interface{}) interface{} { - if val == nil { - return nil - } - if policyTags, ok := val.(map[string]interface{}); ok { - // policyTags = {} is same as nil. - if len(policyTags) == 0 { - return nil - } - // policyTags = {names = []} is same as nil. - if names, ok := policyTags["names"].([]interface{}); ok && len(names) == 0 { - return nil - } - } - return val -} - -// Compares two existing schema implementations and decides if -// it is changeable.. pairs with a force new on not changeable -func resourceBigQueryTableSchemaIsChangeable(old, new interface{}, isExternalTable bool, topLevel bool) (bool, error) { - switch old.(type) { - case []interface{}: - arrayOld := old.([]interface{}) - arrayNew, ok := new.([]interface{}) - sameNameColumns := 0 - droppedColumns := 0 - if !ok { - // if not both arrays not changeable - return false, nil - } - if err := bigQueryTablecheckNameExists(arrayOld); err != nil { - return false, err - } - mapOld := bigQueryArrayToMapIndexedByName(arrayOld) - if err := bigQueryTablecheckNameExists(arrayNew); err != nil { - return false, err - } - mapNew := bigQueryArrayToMapIndexedByName(arrayNew) - for key := range mapNew { - // making unchangeable if an newly added column is with REQUIRED mode - if _, ok := mapOld[key]; !ok { - items := mapNew[key].(map[string]interface{}) - for k := range items { - if k == "mode" && fmt.Sprintf("%v", items[k]) == "REQUIRED" { - return false, nil - } - } - } - } - for key := range mapOld { - // dropping top level columns can happen in-place - // but this doesn't apply to external tables - if _, ok := mapNew[key]; !ok { - if !topLevel || isExternalTable { - return false, nil - } - droppedColumns += 1 - continue - } - - isChangable, err := resourceBigQueryTableSchemaIsChangeable(mapOld[key], mapNew[key], isExternalTable, false) - if err != nil || !isChangable { - return false, err - } else if isChangable && topLevel { - // top level column that exists in the new schema - sameNameColumns += 1 - } - } - // in-place column dropping alongside column additions is not allowed - // as of now because user intention can be ambiguous (e.g. column renaming) - newColumns := len(arrayNew) - sameNameColumns - return (droppedColumns == 0) || (newColumns == 0), nil - case map[string]interface{}: - objectOld := old.(map[string]interface{}) - objectNew, ok := new.(map[string]interface{}) - if !ok { - // if both aren't objects - return false, nil - } - var unionOfKeys map[string]bool = make(map[string]bool) - for key := range objectOld { - unionOfKeys[key] = true - } - for key := range objectNew { - unionOfKeys[key] = true - } - for key := range unionOfKeys { - valOld := objectOld[key] - valNew := objectNew[key] - switch key { - case "name": - if valOld != valNew { - return false, nil - } - case "type": - if valOld == nil || valNew == nil { - // This is invalid, so it shouldn't require a ForceNew - return true, nil - } - if !bigQueryTableTypeEq(valOld.(string), valNew.(string)) { - return false, nil - } - case "mode": - if bigQueryTableModeIsForceNew( - bigQueryTableNormalizeMode(valOld), - bigQueryTableNormalizeMode(valNew), - ) { - return false, nil - } - case "fields": - return resourceBigQueryTableSchemaIsChangeable(valOld, valNew, isExternalTable, false) - - // other parameters: description, policyTags and - // policyTags.names[] are changeable - } - } - return true, nil - case string, float64, bool, nil: - // realistically this shouldn't hit - log.Printf("[DEBUG] comparison of generics hit... not expected") - return old == new, nil - default: - log.Printf("[DEBUG] tried to iterate through json but encountered a non native type to json deserialization... please ensure you are passing a json object from json.Unmarshall") - return false, errors.New("unable to compare values") - } -} - -func resourceBigQueryTableSchemaCustomizeDiffFunc(d tpgresource.TerraformResourceDiff) error { - if _, hasSchema := d.GetOk("schema"); hasSchema { - oldSchema, newSchema := d.GetChange("schema") - oldSchemaText := oldSchema.(string) - newSchemaText := newSchema.(string) - if oldSchemaText == "null" { - // The API can return an empty schema which gets encoded to "null" during read. - oldSchemaText = "[]" - } - if newSchemaText == "null" { - newSchemaText = "[]" - } - var old, new interface{} - if err := json.Unmarshal([]byte(oldSchemaText), &old); err != nil { - // don't return error, its possible we are going from no schema to schema - // this case will be cover on the conparision regardless. - log.Printf("[DEBUG] unable to unmarshal json customized diff - %v", err) - } - if err := json.Unmarshal([]byte(newSchemaText), &new); err != nil { - // same as above - log.Printf("[DEBUG] unable to unmarshal json customized diff - %v", err) - } - _, isExternalTable := d.GetOk("external_data_configuration") - isChangeable, err := resourceBigQueryTableSchemaIsChangeable(old, new, isExternalTable, true) - if err != nil { - return err - } - if !isChangeable { - if err := d.ForceNew("schema"); err != nil { - return err - } - } - return nil - } - return nil -} - -func resourceBigQueryTableSchemaCustomizeDiff(_ context.Context, d *schema.ResourceDiff, meta interface{}) error { - return resourceBigQueryTableSchemaCustomizeDiffFunc(d) -} - -func validateBigQueryTableSchema(v interface{}, k string) (warnings []string, errs []error) { - if v == nil { - return - } - - if _, e := validation.StringIsJSON(v, k); e != nil { - errs = append(errs, e...) - return - } - - var jsonList []interface{} - if err := json.Unmarshal([]byte(v.(string)), &jsonList); err != nil { - errs = append(errs, fmt.Errorf("\"schema\" is not a JSON array: %s", err)) - return - } - - for _, v := range jsonList { - if v == nil { - errs = append(errs, errors.New("\"schema\" contains a nil element")) - return - } - } - - return -} - -func ResourceBigQueryTable() *schema.Resource { - return &schema.Resource{ - Create: resourceBigQueryTableCreate, - Read: resourceBigQueryTableRead, - Delete: resourceBigQueryTableDelete, - Update: resourceBigQueryTableUpdate, - Importer: &schema.ResourceImporter{ - State: resourceBigQueryTableImport, - }, - CustomizeDiff: customdiff.All( - tpgresource.DefaultProviderProject, - resourceBigQueryTableSchemaCustomizeDiff, - tpgresource.SetLabelsDiff, - ), - Schema: map[string]*schema.Schema{ - // TableId: [Required] The ID of the table. The ID must contain only - // letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum - // length is 1,024 characters. - "table_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `A unique ID for the resource. Changing this forces a new resource to be created.`, - }, - - // DatasetId: [Required] The ID of the dataset containing this table. - "dataset_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The dataset ID to create the table in. Changing this forces a new resource to be created.`, - }, - - // ProjectId: [Required] The ID of the project containing this table. - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: `The ID of the project in which the resource belongs.`, - }, - - // Description: [Optional] A user-friendly description of this table. - "description": { - Type: schema.TypeString, - Optional: true, - Description: `The field description.`, - }, - - // ExpirationTime: [Optional] The time when this table expires, in - // milliseconds since the epoch. If not present, the table will persist - // indefinitely. Expired tables will be deleted and their storage - // reclaimed. - "expiration_time": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - Description: `The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed.`, - }, - - // ExternalDataConfiguration [Optional] Describes the data format, - // location, and other properties of a table stored outside of BigQuery. - // By defining these properties, the data source can then be queried as - // if it were a standard BigQuery table. - "external_data_configuration": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Description: `Describes the data format, location, and other properties of a table stored outside of BigQuery. By defining these properties, the data source can then be queried as if it were a standard BigQuery table.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - // Autodetect : [Required] If true, let BigQuery try to autodetect the - // schema and format of the table. - "autodetect": { - Type: schema.TypeBool, - Required: true, - Description: `Let BigQuery try to autodetect the schema and format of the table.`, - }, - // SourceFormat [Required] The data format. - "source_format": { - Type: schema.TypeString, - Optional: true, - Description: `Please see sourceFormat under ExternalDataConfiguration in Bigquery's public API documentation (https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#externaldataconfiguration) for supported formats. To use "GOOGLE_SHEETS" the scopes must include "googleapis.com/auth/drive.readonly".`, - ValidateFunc: validation.StringInSlice([]string{ - "CSV", "GOOGLE_SHEETS", "NEWLINE_DELIMITED_JSON", "AVRO", "ICEBERG", "DATASTORE_BACKUP", "PARQUET", "ORC", "BIGTABLE", - }, false), - }, - // SourceURIs [Required] The fully-qualified URIs that point to your data in Google Cloud. - "source_uris": { - Type: schema.TypeList, - Required: true, - Description: `A list of the fully-qualified URIs that point to your data in Google Cloud.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - // FileSetSpecType: [Optional] Specifies how source URIs are interpreted for constructing the file set to load. By default source URIs are expanded against the underlying storage. Other options include specifying manifest files. Only applicable to object storage systems. - "file_set_spec_type": { - Type: schema.TypeString, - Optional: true, - Description: `Specifies how source URIs are interpreted for constructing the file set to load. By default source URIs are expanded against the underlying storage. Other options include specifying manifest files. Only applicable to object storage systems.`, - }, - // Compression: [Optional] The compression type of the data source. - "compression": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice([]string{"NONE", "GZIP"}, false), - Default: "NONE", - Description: `The compression type of the data source. Valid values are "NONE" or "GZIP".`, - }, - // Schema: [Optional] The schema for the data. - // Schema is required for CSV and JSON formats if autodetect is not on. - // Schema is disallowed for Google Cloud Bigtable, Cloud Datastore backups, Avro, Iceberg, ORC, and Parquet formats. - "schema": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ValidateFunc: validateBigQueryTableSchema, - StateFunc: func(v interface{}) string { - json, _ := structure.NormalizeJsonString(v) - return json - }, - Description: `A JSON schema for the external table. Schema is required for CSV and JSON formats and is disallowed for Google Cloud Bigtable, Cloud Datastore backups, and Avro formats when using external tables.`, - }, - // CsvOptions: [Optional] Additional properties to set if - // sourceFormat is set to CSV. - "csv_options": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Description: `Additional properties to set if source_format is set to "CSV".`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - // Quote: [Required] The value that is used to quote data - // sections in a CSV file. - "quote": { - Type: schema.TypeString, - Required: true, - Description: `The value that is used to quote data sections in a CSV file. If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allow_quoted_newlines property to true. The API-side default is ", specified in Terraform escaped as \". Due to limitations with Terraform default values, this value is required to be explicitly set.`, - }, - // AllowJaggedRows: [Optional] Indicates if BigQuery should - // accept rows that are missing trailing optional columns. - "allow_jagged_rows": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: `Indicates if BigQuery should accept rows that are missing trailing optional columns.`, - }, - // AllowQuotedNewlines: [Optional] Indicates if BigQuery - // should allow quoted data sections that contain newline - // characters in a CSV file. The default value is false. - "allow_quoted_newlines": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: `Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false.`, - }, - // Encoding: [Optional] The character encoding of the data. - // The supported values are UTF-8 or ISO-8859-1. - "encoding": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice([]string{"ISO-8859-1", "UTF-8"}, false), - Default: "UTF-8", - Description: `The character encoding of the data. The supported values are UTF-8 or ISO-8859-1.`, - }, - // FieldDelimiter: [Optional] The separator for fields in a CSV file. - "field_delimiter": { - Type: schema.TypeString, - Optional: true, - Default: ",", - Description: `The separator for fields in a CSV file.`, - }, - // SkipLeadingRows: [Optional] The number of rows at the top - // of a CSV file that BigQuery will skip when reading the data. - "skip_leading_rows": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - Description: `The number of rows at the top of a CSV file that BigQuery will skip when reading the data.`, - }, - }, - }, - }, - // jsonOptions: [Optional] Additional properties to set if sourceFormat is set to JSON. - "json_options": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Description: `Additional properties to set if sourceFormat is set to JSON.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "encoding": { - Type: schema.TypeString, - Optional: true, - Default: "UTF-8", - ValidateFunc: validation.StringInSlice([]string{"UTF-8", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE"}, false), - Description: `The character encoding of the data. The supported values are UTF-8, UTF-16BE, UTF-16LE, UTF-32BE, and UTF-32LE. The default value is UTF-8.`, - }, - }, - }, - }, - - "json_extension": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice([]string{"GEOJSON"}, false), - Description: `Load option to be used together with sourceFormat newline-delimited JSON to indicate that a variant of JSON is being loaded. To load newline-delimited GeoJSON, specify GEOJSON (and sourceFormat must be set to NEWLINE_DELIMITED_JSON).`, - }, - - "bigtable_options": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Description: `Additional options if sourceFormat is set to BIGTABLE.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "column_family": { - Type: schema.TypeList, - Optional: true, - Description: `A list of column families to expose in the table schema along with their types. This list restricts the column families that can be referenced in queries and specifies their value types. You can use this list to do type conversions - see the 'type' field for more details. If you leave this list empty, all column families are present in the table schema and their values are read as BYTES. During a query only the column families referenced in that query are read from Bigtable.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "column": { - Type: schema.TypeList, - Optional: true, - Description: `A List of columns that should be exposed as individual fields as opposed to a list of (column name, value) pairs. All columns whose qualifier matches a qualifier in this list can be accessed as Other columns can be accessed as a list through column field`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "qualifier_encoded": { - Type: schema.TypeString, - Optional: true, - Description: `Qualifier of the column. Columns in the parent column family that has this exact qualifier are exposed as . field. If the qualifier is valid UTF-8 string, it can be specified in the qualifierString field. Otherwise, a base-64 encoded value must be set to qualifierEncoded. The column field name is the same as the column qualifier. However, if the qualifier is not a valid BigQuery field identifier i.e. does not match [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided as fieldName.`, - }, - "qualifier_string": { - Type: schema.TypeString, - Optional: true, - Description: `Qualifier string.`, - }, - "field_name": { - Type: schema.TypeString, - Optional: true, - Description: `If the qualifier is not a valid BigQuery field identifier i.e. does not match [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided as the column field name and is used as field name in queries.`, - }, - "type": { - Type: schema.TypeString, - Optional: true, - Description: `The type to convert the value in cells of this column. The values are expected to be encoded using HBase Bytes.toBytes function when using the BINARY encoding value. Following BigQuery types are allowed (case-sensitive): "BYTES", "STRING", "INTEGER", "FLOAT", "BOOLEAN", "JSON", Default type is "BYTES". 'type' can also be set at the column family level. However, the setting at this level takes precedence if 'type' is set at both levels.`, - }, - "encoding": { - Type: schema.TypeString, - Optional: true, - Description: `The encoding of the values when the type is not STRING. Acceptable encoding values are: TEXT - indicates values are alphanumeric text strings. BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions. 'encoding' can also be set at the column family level. However, the setting at this level takes precedence if 'encoding' is set at both levels.`, - }, - "only_read_latest": { - Type: schema.TypeBool, - Optional: true, - Description: `If this is set, only the latest version of value in this column are exposed. 'onlyReadLatest' can also be set at the column family level. However, the setting at this level takes precedence if 'onlyReadLatest' is set at both levels.`, - }, - }, - }, - }, - "family_id": { - Type: schema.TypeString, - Optional: true, - Description: `Identifier of the column family.`, - }, - "type": { - Type: schema.TypeString, - Optional: true, - Description: `The type to convert the value in cells of this column family. The values are expected to be encoded using HBase Bytes.toBytes function when using the BINARY encoding value. Following BigQuery types are allowed (case-sensitive): "BYTES", "STRING", "INTEGER", "FLOAT", "BOOLEAN", "JSON". Default type is BYTES. This can be overridden for a specific column by listing that column in 'columns' and specifying a type for it.`, - }, - "encoding": { - Type: schema.TypeString, - Optional: true, - Description: `The encoding of the values when the type is not STRING. Acceptable encoding values are: TEXT - indicates values are alphanumeric text strings. BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions. This can be overridden for a specific column by listing that column in 'columns' and specifying an encoding for it.`, - }, - "only_read_latest": { - Type: schema.TypeBool, - Optional: true, - Description: `If this is set only the latest version of value are exposed for all columns in this column family. This can be overridden for a specific column by listing that column in 'columns' and specifying a different setting for that column.`, - }, - }, - }, - }, - "ignore_unspecified_column_families": { - Type: schema.TypeBool, - Optional: true, - Description: `If field is true, then the column families that are not specified in columnFamilies list are not exposed in the table schema. Otherwise, they are read with BYTES type values. The default value is false.`, - }, - "read_rowkey_as_string": { - Type: schema.TypeBool, - Optional: true, - Description: `If field is true, then the rowkey column families will be read and converted to string. Otherwise they are read with BYTES type values and users need to manually cast them with CAST if necessary. The default value is false.`, - }, - "output_column_families_as_json": { - Type: schema.TypeBool, - Optional: true, - Description: `If field is true, then each column family will be read as a single JSON column. Otherwise they are read as a repeated cell structure containing timestamp/value tuples. The default value is false.`, - }, - }, - }, - }, - - "parquet_options": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Description: `Additional properties to set if sourceFormat is set to PARQUET.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "enum_as_string": { - Type: schema.TypeBool, - Optional: true, - Description: `Indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default.`, - }, - "enable_list_inference": { - Type: schema.TypeBool, - Optional: true, - Description: `Indicates whether to use schema inference specifically for Parquet LIST logical type.`, - }, - }, - }, - }, - // GoogleSheetsOptions: [Optional] Additional options if sourceFormat is set to GOOGLE_SHEETS. - "google_sheets_options": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Description: `Additional options if source_format is set to "GOOGLE_SHEETS".`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - // Range: [Optional] Range of a sheet to query from. Only used when non-empty. - // Typical format: !: - "range": { - Type: schema.TypeString, - Optional: true, - Description: `Range of a sheet to query from. Only used when non-empty. At least one of range or skip_leading_rows must be set. Typical format: "sheet_name!top_left_cell_id:bottom_right_cell_id" For example: "sheet1!A1:B20`, - AtLeastOneOf: []string{ - "external_data_configuration.0.google_sheets_options.0.skip_leading_rows", - "external_data_configuration.0.google_sheets_options.0.range", - }, - }, - // SkipLeadingRows: [Optional] The number of rows at the top - // of the sheet that BigQuery will skip when reading the data. - "skip_leading_rows": { - Type: schema.TypeInt, - Optional: true, - Description: `The number of rows at the top of the sheet that BigQuery will skip when reading the data. At least one of range or skip_leading_rows must be set.`, - AtLeastOneOf: []string{ - "external_data_configuration.0.google_sheets_options.0.skip_leading_rows", - "external_data_configuration.0.google_sheets_options.0.range", - }, - }, - }, - }, - }, - - // HivePartitioningOptions:: [Optional] Options for configuring hive partitioning detect. - "hive_partitioning_options": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Description: `When set, configures hive partitioning support. Not all storage formats support hive partitioning -- requesting hive partitioning on an unsupported format will lead to an error, as will providing an invalid specification.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - // Mode: [Optional] [Experimental] When set, what mode of hive partitioning to use when reading data. - // Two modes are supported. - //* AUTO: automatically infer partition key name(s) and type(s). - //* STRINGS: automatically infer partition key name(s). - "mode": { - Type: schema.TypeString, - Optional: true, - Description: `When set, what mode of hive partitioning to use when reading data.`, - }, - // RequirePartitionFilter: [Optional] If set to true, queries over this table - // require a partition filter that can be used for partition elimination to be - // specified. - "require_partition_filter": { - Type: schema.TypeBool, - Optional: true, - Description: `If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.`, - }, - // SourceUriPrefix: [Optional] [Experimental] When hive partition detection is requested, a common for all source uris must be required. - // The prefix must end immediately before the partition key encoding begins. - "source_uri_prefix": { - Type: schema.TypeString, - Optional: true, - Description: `When hive partition detection is requested, a common for all source uris must be required. The prefix must end immediately before the partition key encoding begins.`, - }, - }, - }, - }, - // AvroOptions: [Optional] Additional options if sourceFormat is set to AVRO. - "avro_options": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Description: `Additional options if source_format is set to "AVRO"`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "use_avro_logical_types": { - Type: schema.TypeBool, - Required: true, - Description: `If sourceFormat is set to "AVRO", indicates whether to interpret logical types as the corresponding BigQuery data type (for example, TIMESTAMP), instead of using the raw type (for example, INTEGER).`, - }, - }, - }, - }, - - // IgnoreUnknownValues: [Optional] Indicates if BigQuery should - // allow extra values that are not represented in the table schema. - // If true, the extra values are ignored. If false, records with - // extra columns are treated as bad records, and if there are too - // many bad records, an invalid error is returned in the job result. - // The default value is false. - "ignore_unknown_values": { - Type: schema.TypeBool, - Optional: true, - Description: `Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false.`, - }, - // MaxBadRecords: [Optional] The maximum number of bad records that - // BigQuery can ignore when reading data. - "max_bad_records": { - Type: schema.TypeInt, - Optional: true, - Description: `The maximum number of bad records that BigQuery can ignore when reading data.`, - }, - // ConnectionId: [Optional] The connection specifying the credentials - // to be used to read external storage, such as Azure Blob, - // Cloud Storage, or S3. The connectionId can have the form - // "{{"{{"}}project{{"}}"}}.{{"{{"}}location{{"}}"}}.{{"{{"}}connection_id{{"}}"}}" or - // "projects/{{"{{"}}project{{"}}"}}/locations/{{"{{"}}location{{"}}"}}/connections/{{"{{"}}connection_id{{"}}"}}". - "connection_id": { - Type: schema.TypeString, - Optional: true, - DiffSuppressFunc: bigQueryTableConnectionIdSuppress, - Description: `The connection specifying the credentials to be used to read external storage, such as Azure Blob, Cloud Storage, or S3. The connectionId can have the form "{{"{{"}}project{{"}}"}}.{{"{{"}}location{{"}}"}}.{{"{{"}}connection_id{{"}}"}}" or "projects/{{"{{"}}project{{"}}"}}/locations/{{"{{"}}location{{"}}"}}/connections/{{"{{"}}connection_id{{"}}"}}".`, - }, - "reference_file_schema_uri": { - Type: schema.TypeString, - Optional: true, - Description: `When creating an external table, the user can provide a reference file with the table schema. This is enabled for the following formats: AVRO, PARQUET, ORC.`, - }, - "metadata_cache_mode": { - Type: schema.TypeString, - Optional: true, - Description: `Metadata Cache Mode for the table. Set this to enable caching of metadata from external data source.`, - ValidateFunc: validation.StringInSlice([]string{"AUTOMATIC", "MANUAL"}, false), - }, - "object_metadata": { - Type: schema.TypeString, - Optional: true, - Description: `Object Metadata is used to create Object Tables. Object Tables contain a listing of objects (with their metadata) found at the sourceUris. If ObjectMetadata is set, sourceFormat should be omitted.`, - ConflictsWith: []string{"external_data_configuration.0.source_format"}, - }, - }, - }, - }, - - // FriendlyName: [Optional] A descriptive name for this table. - "friendly_name": { - Type: schema.TypeString, - Optional: true, - Description: `A descriptive name for the table.`, - }, - - // max_staleness: [Optional] The maximum staleness of data that could be returned when the table (or stale MV) is queried. Staleness encoded as a string encoding of sql IntervalValue type. - "max_staleness": { - Type: schema.TypeString, - Optional: true, - Description: `The maximum staleness of data that could be returned when the table (or stale MV) is queried. Staleness encoded as a string encoding of [SQL IntervalValue type](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#interval_type).`, - }, - - // Labels: [Experimental] The labels associated with this table. You can - // use these to organize and group your tables. Label keys and values - // can be no longer than 63 characters, can only contain lowercase - // letters, numeric characters, underscores and dashes. International - // characters are allowed. Label values are optional. Label keys must - // start with a letter and each label in the list must have a different - // key. - "labels": { - Type: schema.TypeMap, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Description: `A mapping of labels to assign to the resource. - - **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. - Please refer to the field 'effective_labels' for all of the labels present on the resource.`, - }, - "terraform_labels": { - Type: schema.TypeMap, - Computed: true, - Description: `The combination of labels configured directly on the resource and default labels configured on the provider.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "effective_labels": { - Type: schema.TypeMap, - Computed: true, - Description: `All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - // Schema: [Optional] Describes the schema of this table. - "schema": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: validateBigQueryTableSchema, - StateFunc: func(v interface{}) string { - json, _ := structure.NormalizeJsonString(v) - return json - }, - DiffSuppressFunc: bigQueryTableSchemaDiffSuppress, - Description: `A JSON schema for the table.`, - }, - // View: [Optional] If specified, configures this table as a view. - "view": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Description: `If specified, configures this table as a view.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - // Query: [Required] A query that BigQuery executes when the view is - // referenced. - "query": { - Type: schema.TypeString, - Required: true, - Description: `A query that BigQuery executes when the view is referenced.`, - }, - - // UseLegacySQL: [Optional] Specifies whether to use BigQuery's - // legacy SQL for this view. The default value is true. If set to - // false, the view will use BigQuery's standard SQL: - "use_legacy_sql": { - Type: schema.TypeBool, - Optional: true, - Default: true, - Description: `Specifies whether to use BigQuery's legacy SQL for this view. The default value is true. If set to false, the view will use BigQuery's standard SQL`, - }, - }, - }, - }, - - // Materialized View: [Optional] If specified, configures this table as a materialized view. - "materialized_view": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Description: `If specified, configures this table as a materialized view.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - // EnableRefresh: [Optional] Enable automatic refresh of - // the materialized view when the base table is updated. The default - // value is "true". - "enable_refresh": { - Type: schema.TypeBool, - Optional: true, - Default: true, - Description: `Specifies if BigQuery should automatically refresh materialized view when the base table is updated. The default is true.`, - }, - - // RefreshIntervalMs: [Optional] The maximum frequency - // at which this materialized view will be refreshed. The default value - // is 1800000 (30 minutes). - "refresh_interval_ms": { - Type: schema.TypeInt, - Default: 1800000, - Optional: true, - Description: `Specifies maximum frequency at which this materialized view will be refreshed. The default is 1800000.`, - }, - - "allow_non_incremental_definition": { - Type: schema.TypeBool, - Default: false, - Optional: true, - ForceNew: true, - Description: `Allow non incremental materialized view definition. The default value is false.`, - }, - - // Query: [Required] A query whose result is persisted - "query": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `A query whose result is persisted.`, - }, - }, - }, - }, - - // TimePartitioning: [Experimental] If specified, configures time-based - // partitioning for this table. - "time_partitioning": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Description: `If specified, configures time-based partitioning for this table.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - // ExpirationMs: [Optional] Number of milliseconds for which to keep the storage for a - // partition. If unspecified when the table is created in a dataset that has - // `defaultPartitionExpirationMs`, it will inherit the value of - // `defaultPartitionExpirationMs` from the dataset. - // To specify a unlimited expiration, set the value to 0. - "expiration_ms": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - Description: `Number of milliseconds for which to keep the storage for a partition.`, - }, - - // Type: [Required] The supported types are DAY, HOUR, MONTH, and YEAR, which will generate - // one partition per day, hour, month, and year, respectively. - "type": { - Type: schema.TypeString, - Required: true, - Description: `The supported types are DAY, HOUR, MONTH, and YEAR, which will generate one partition per day, hour, month, and year, respectively.`, - ValidateFunc: validation.StringInSlice([]string{"DAY", "HOUR", "MONTH", "YEAR"}, false), - }, - - // Field: [Optional] The field used to determine how to create a time-based - // partition. If time-based partitioning is enabled without this value, the - // table is partitioned based on the load time. - "field": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The field used to determine how to create a time-based partition. If time-based partitioning is enabled without this value, the table is partitioned based on the load time.`, - }, - - // RequirePartitionFilter: [Optional] If set to true, queries over this table - // require a partition filter that can be used for partition elimination to be - // specified. - "require_partition_filter": { - Type: schema.TypeBool, - Optional: true, - Description: `If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.`, - Deprecated: `This field is deprecated and will be removed in a future major release; please use the top level field with the same name instead.`, - ConflictsWith: []string{"require_partition_filter"}, - }, - }, - }, - }, - - // RangePartitioning: [Optional] If specified, configures range-based - // partitioning for this table. - "range_partitioning": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Description: `If specified, configures range-based partitioning for this table.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - // Field: [Required] The field used to determine how to create a range-based - // partition. - "field": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The field used to determine how to create a range-based partition.`, - }, - - // Range: [Required] Information required to partition based on ranges. - "range": { - Type: schema.TypeList, - Required: true, - MaxItems: 1, - Description: `Information required to partition based on ranges. Structure is documented below.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - // Start: [Required] Start of the range partitioning, inclusive. - "start": { - Type: schema.TypeInt, - Required: true, - Description: `Start of the range partitioning, inclusive.`, - }, - - // End: [Required] End of the range partitioning, exclusive. - "end": { - Type: schema.TypeInt, - Required: true, - Description: `End of the range partitioning, exclusive.`, - }, - - // Interval: [Required] The width of each range within the partition. - "interval": { - Type: schema.TypeInt, - Required: true, - Description: `The width of each range within the partition.`, - }, - }, - }, - }, - }, - }, - }, - - // RequirePartitionFilter: [Optional] If set to true, queries over this table - // require a partition filter that can be used for partition elimination to be - // specified. - "require_partition_filter": { - Type: schema.TypeBool, - Optional: true, - Description: `If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.`, - ConflictsWith: []string{"time_partitioning.0.require_partition_filter"}, - }, - - // Clustering: [Optional] Specifies column names to use for data clustering. Up to four - // top-level columns are allowed, and should be specified in descending priority order. - "clustering": { - Type: schema.TypeList, - Optional: true, - MaxItems: 4, - Description: `Specifies column names to use for data clustering. Up to four top-level columns are allowed, and should be specified in descending priority order.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "encryption_configuration": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - MaxItems: 1, - Description: `Specifies how the table should be encrypted. If left blank, the table will be encrypted with a Google-managed key; that process is transparent to the user.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "kms_key_name": { - Type: schema.TypeString, - Required: true, - Description: `The self link or full name of a key which should be used to encrypt this table. Note that the default bigquery service account will need to have encrypt/decrypt permissions on this key - you may want to see the google_bigquery_default_service_account datasource and the google_kms_crypto_key_iam_binding resource.`, - }, - "kms_key_version": { - Type: schema.TypeString, - Computed: true, - Description: `The self link or full name of the kms key version used to encrypt this table.`, - }, - }, - }, - }, - - // CreationTime: [Output-only] The time when this table was created, in - // milliseconds since the epoch. - "creation_time": { - Type: schema.TypeInt, - Computed: true, - Description: `The time when this table was created, in milliseconds since the epoch.`, - }, - - // Etag: [Output-only] A hash of this resource. - "etag": { - Type: schema.TypeString, - Computed: true, - Description: `A hash of the resource.`, - }, - - // LastModifiedTime: [Output-only] The time when this table was last - // modified, in milliseconds since the epoch. - "last_modified_time": { - Type: schema.TypeInt, - Computed: true, - Description: `The time when this table was last modified, in milliseconds since the epoch.`, - }, - - // Location: [Output-only] The geographic location where the table - // resides. This value is inherited from the dataset. - "location": { - Type: schema.TypeString, - Computed: true, - Description: `The geographic location where the table resides. This value is inherited from the dataset.`, - }, - - // NumBytes: [Output-only] The size of this table in bytes, excluding - // any data in the streaming buffer. - "num_bytes": { - Type: schema.TypeInt, - Computed: true, - Description: `The geographic location where the table resides. This value is inherited from the dataset.`, - }, - - // NumLongTermBytes: [Output-only] The number of bytes in the table that - // are considered "long-term storage". - "num_long_term_bytes": { - Type: schema.TypeInt, - Computed: true, - Description: `The number of bytes in the table that are considered "long-term storage".`, - }, - - // NumRows: [Output-only] The number of rows of data in this table, - // excluding any data in the streaming buffer. - "num_rows": { - Type: schema.TypeInt, - Computed: true, - Description: `The number of rows of data in this table, excluding any data in the streaming buffer.`, - }, - - // SelfLink: [Output-only] A URL that can be used to access this - // resource again. - "self_link": { - Type: schema.TypeString, - Computed: true, - Description: `The URI of the created resource.`, - }, - - // Type: [Output-only] Describes the table type. The following values - // are supported: TABLE: A normal BigQuery table. VIEW: A virtual table - // defined by a SQL query. EXTERNAL: A table that references data stored - // in an external storage system, such as Google Cloud Storage. The - // default value is TABLE. - "type": { - Type: schema.TypeString, - Computed: true, - Description: `Describes the table type.`, - }, - - "deletion_protection": { - Type: schema.TypeBool, - Optional: true, - Default: true, - Description: `Whether Terraform will be prevented from destroying the instance. When the field is set to true or unset in Terraform state, a terraform apply or terraform destroy that would delete the table will fail. When the field is set to false, deleting the table is allowed.`, - }, - - {{ if ne $.TargetVersionName `ga` -}} - "allow_resource_tags_on_deletion": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: `Whether or not to allow table deletion when there are still resource tags attached.`, - }, - - {{ end }} - // TableConstraints: [Optional] Defines the primary key and foreign keys. - "table_constraints": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Description: `Defines the primary key and foreign keys.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - // PrimaryKey: [Optional] Represents the primary key constraint - // on a table's columns. Present only if the table has a primary key. - // The primary key is not enforced. - "primary_key": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Description: `Represents a primary key constraint on a table's columns. Present only if the table has a primary key. The primary key is not enforced.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - //Columns: [Required] The columns that are composed of the primary key constraint. - "columns": { - Type: schema.TypeList, - Required: true, - Description: `The columns that are composed of the primary key constraint.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, - }, - }, - - // ForeignKeys: [Optional] Present only if the table has a foreign key. - // The foreign key is not enforced. - "foreign_keys": { - Type: schema.TypeList, - Optional: true, - Description: `Present only if the table has a foreign key. The foreign key is not enforced.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - // Name: [Optional] Set only if the foreign key constraint is named. - "name": { - Type: schema.TypeString, - Optional: true, - Description: `Set only if the foreign key constraint is named.`, - }, - - // ReferencedTable: [Required] The table that holds the primary key - // and is referenced by this foreign key. - "referenced_table": { - Type: schema.TypeList, - Required: true, - MaxItems: 1, - Description: `The table that holds the primary key and is referenced by this foreign key.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - // ProjectId: [Required] The ID of the project containing this table. - "project_id": { - Type: schema.TypeString, - Required: true, - Description: `The ID of the project containing this table.`, - }, - - // DatasetId: [Required] The ID of the dataset containing this table. - "dataset_id": { - Type: schema.TypeString, - Required: true, - Description: `The ID of the dataset containing this table.`, - }, - - // TableId: [Required] The ID of the table. The ID must contain only - // letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum - // length is 1,024 characters. Certain operations allow suffixing of - // the table ID with a partition decorator, such as - // sample_table$20190123. - "table_id": { - Type: schema.TypeString, - Required: true, - Description: `The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters. Certain operations allow suffixing of the table ID with a partition decorator, such as sample_table$20190123.`, - }, - }, - }, - }, - - // ColumnReferences: [Required] The pair of the foreign key column and primary key column. - "column_references": { - Type: schema.TypeList, - Required: true, - MaxItems: 1, - Description: `The pair of the foreign key column and primary key column.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - // ReferencingColumn: [Required] The column that composes the foreign key. - "referencing_column": { - Type: schema.TypeString, - Required: true, - Description: `The column that composes the foreign key.`, - }, - - // ReferencedColumn: [Required] The column in the primary key that are - // referenced by the referencingColumn - "referenced_column": { - Type: schema.TypeString, - Required: true, - Description: `The column in the primary key that are referenced by the referencingColumn.`, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - // TableReplicationInfo: [Optional] Replication info of a table created using `AS REPLICA` DDL like: `CREATE MATERIALIZED VIEW mv1 AS REPLICA OF src_mv`. - "table_replication_info": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - MaxItems: 1, - Description: `Replication info of a table created using "AS REPLICA" DDL like: "CREATE MATERIALIZED VIEW mv1 AS REPLICA OF src_mv".`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "source_project_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The ID of the source project.`, - }, - "source_dataset_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The ID of the source dataset.`, - }, - "source_table_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The ID of the source materialized view.`, - }, - "replication_interval_ms": { - Type: schema.TypeInt, - Default: 300000, - Optional: true, - ForceNew: true, - Description: `The interval at which the source materialized view is polled for updates. The default is 300000.`, - }, - }, - }, - }, - {{- if ne $.TargetVersionName "ga" }} - "resource_tags": { - Type: schema.TypeMap, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Description: `The tags attached to this table. Tag keys are globally unique. Tag key is expected to be in the namespaced format, for example "123456789012/environment" where 123456789012 is the ID of the parent organization or project resource for this tag key. Tag value is expected to be the short name, for example "Production".`, - }, - {{- end }} - }, - UseJSONNumber: true, - } -} - -func resourceTable(d *schema.ResourceData, meta interface{}) (*bigquery.Table, error) { - config := meta.(*transport_tpg.Config) - - project, err := tpgresource.GetProject(d, config) - if err != nil { - return nil, err - } - - table := &bigquery.Table{ - TableReference: &bigquery.TableReference{ - DatasetId: d.Get("dataset_id").(string), - TableId: d.Get("table_id").(string), - ProjectId: project, - }, - } - - if v, ok := d.GetOk("view"); ok { - table.View = expandView(v) - } - - if v, ok := d.GetOk("materialized_view"); ok { - table.MaterializedView = expandMaterializedView(v) - } - - if v, ok := d.GetOk("description"); ok { - table.Description = v.(string) - } - - if v, ok := d.GetOk("expiration_time"); ok { - table.ExpirationTime = int64(v.(int)) - } - - if v, ok := d.GetOk("external_data_configuration"); ok { - externalDataConfiguration, err := expandExternalDataConfiguration(v) - if err != nil { - return nil, err - } - - table.ExternalDataConfiguration = externalDataConfiguration - } - - if v, ok := d.GetOk("friendly_name"); ok { - table.FriendlyName = v.(string) - } - - if v, ok := d.GetOk("max_staleness"); ok { - table.MaxStaleness = v.(string) - } - - if v, ok := d.GetOk("encryption_configuration.0.kms_key_name"); ok { - table.EncryptionConfiguration = &bigquery.EncryptionConfiguration{ - KmsKeyName: v.(string), - } - } - - if v, ok := d.GetOk("effective_labels"); ok { - labels := map[string]string{} - - for k, v := range v.(map[string]interface{}) { - labels[k] = v.(string) - } - - table.Labels = labels - } - - if v, ok := d.GetOk("schema"); ok { - _, viewPresent := d.GetOk("view") - _, materializedViewPresent := d.GetOk("materialized_view") - managePolicyTags := !viewPresent && !materializedViewPresent - schema, err := expandSchema(v, managePolicyTags) - if err != nil { - return nil, err - } - table.Schema = schema - } - - if v, ok := d.GetOk("time_partitioning"); ok { - table.TimePartitioning = expandTimePartitioning(v) - } - - if v, ok := d.GetOk("range_partitioning"); ok { - rangePartitioning, err := expandRangePartitioning(v) - if err != nil { - return nil, err - } - - table.RangePartitioning = rangePartitioning - } - - if v, ok := d.GetOk("require_partition_filter"); ok { - table.RequirePartitionFilter = v.(bool) - } - - if v, ok := d.GetOk("clustering"); ok { - table.Clustering = &bigquery.Clustering{ - Fields: tpgresource.ConvertStringArr(v.([]interface{})), - ForceSendFields: []string{"Fields"}, - } - } - - if v, ok := d.GetOk("table_constraints"); ok { - tableConstraints, err := expandTableConstraints(v) - if err != nil { - return nil, err - } - - table.TableConstraints = tableConstraints - } - - {{ if ne $.TargetVersionName `ga` -}} - table.ResourceTags = tpgresource.ExpandStringMap(d, "resource_tags") - - {{ end }} - return table, nil -} - -func resourceBigQueryTableCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*transport_tpg.Config) - userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - project, err := tpgresource.GetProject(d, config) - if err != nil { - return err - } - - table, err := resourceTable(d, meta) - if err != nil { - return err - } - - datasetID := d.Get("dataset_id").(string) - - if v, ok := d.GetOk("table_replication_info"); ok { - if table.Schema != nil || table.View != nil || table.MaterializedView != nil { - return errors.New("Schema, view, or materialized view cannot be specified when table replication info is present") - } - - replicationDDL := fmt.Sprintf("CREATE MATERIALIZED VIEW %s.%s.%s", d.Get("project").(string), d.Get("dataset_id").(string), d.Get("table_id").(string)) - - tableReplicationInfo := expandTableReplicationInfo(v) - replicationIntervalMs := tableReplicationInfo["replication_interval_ms"].(int64) - if replicationIntervalMs > 0 { - replicationIntervalSeconds := replicationIntervalMs / 1000 - replicationDDL = fmt.Sprintf("%s OPTIONS(replication_interval_seconds=%d)", replicationDDL, replicationIntervalSeconds) - } - - replicationDDL = fmt.Sprintf("%s AS REPLICA OF %s.%s.%s", replicationDDL, tableReplicationInfo["source_project_id"], tableReplicationInfo["source_dataset_id"], tableReplicationInfo["source_table_id"]) - useLegacySQL := false - - req := &bigquery.QueryRequest{ - Query: replicationDDL, - UseLegacySql: &useLegacySQL, - } - - log.Printf("[INFO] Creating a replica materialized view with DDL: '%s'", replicationDDL) - - _, err := config.NewBigQueryClient(userAgent).Jobs.Query(project, req).Do() - - id := fmt.Sprintf("projects/%s/datasets/%s/tables/%s", project, datasetID, d.Get("table_id").(string)) - if err != nil { - if deleteErr := resourceBigQueryTableDelete(d, meta); deleteErr != nil { - log.Printf("[INFO] Unable to clean up table %s: %s", id, deleteErr) - } - return err - } - - log.Printf("[INFO] BigQuery table %s has been created", id) - d.SetId(id) - - return resourceBigQueryTableRead(d, meta) - } - - if table.View != nil && table.Schema != nil { - - log.Printf("[INFO] Removing schema from table definition because BigQuery does not support setting schema on view creation") - schemaBack := table.Schema - table.Schema = nil - - log.Printf("[INFO] Creating BigQuery table: %s without schema", table.TableReference.TableId) - - res, err := config.NewBigQueryClient(userAgent).Tables.Insert(project, datasetID, table).Do() - if err != nil { - return err - } - - log.Printf("[INFO] BigQuery table %s has been created", res.Id) - d.SetId(fmt.Sprintf("projects/%s/datasets/%s/tables/%s", res.TableReference.ProjectId, res.TableReference.DatasetId, res.TableReference.TableId)) - - table.Schema = schemaBack - log.Printf("[INFO] Updating BigQuery table: %s with schema", table.TableReference.TableId) - if _, err = config.NewBigQueryClient(userAgent).Tables.Update(project, datasetID, res.TableReference.TableId, table).Do(); err != nil { - return err - } - - log.Printf("[INFO] BigQuery table %s has been updated with schema", res.Id) - } else { - log.Printf("[INFO] Creating BigQuery table: %s", table.TableReference.TableId) - - res, err := config.NewBigQueryClient(userAgent).Tables.Insert(project, datasetID, table).Do() - if err != nil { - return err - } - - log.Printf("[INFO] BigQuery table %s has been created", res.Id) - d.SetId(fmt.Sprintf("projects/%s/datasets/%s/tables/%s", res.TableReference.ProjectId, res.TableReference.DatasetId, res.TableReference.TableId)) - } - - return resourceBigQueryTableRead(d, meta) -} - -func resourceBigQueryTableRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*transport_tpg.Config) - userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - log.Printf("[INFO] Reading BigQuery table: %s", d.Id()) - - project, err := tpgresource.GetProject(d, config) - if err != nil { - return err - } - - datasetID := d.Get("dataset_id").(string) - tableID := d.Get("table_id").(string) - - res, err := config.NewBigQueryClient(userAgent).Tables.Get(project, datasetID, tableID).Do() - if err != nil { - return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("BigQuery table %q", tableID)) - } - - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("description", res.Description); err != nil { - return fmt.Errorf("Error setting description: %s", err) - } - if err := d.Set("expiration_time", res.ExpirationTime); err != nil { - return fmt.Errorf("Error setting expiration_time: %s", err) - } - if err := d.Set("friendly_name", res.FriendlyName); err != nil { - return fmt.Errorf("Error setting friendly_name: %s", err) - } - if err := d.Set("max_staleness", res.MaxStaleness); err != nil { - return fmt.Errorf("Error setting max_staleness: %s", err) - } - if err := tpgresource.SetLabels(res.Labels, d, "labels"); err != nil { - return fmt.Errorf("Error setting labels: %s", err) - } - if err := tpgresource.SetLabels(res.Labels, d, "terraform_labels"); err != nil { - return fmt.Errorf("Error setting terraform_labels: %s", err) - } - if err := d.Set("effective_labels", res.Labels); err != nil { - return fmt.Errorf("Error setting effective_labels: %s", err) - } - if err := d.Set("creation_time", res.CreationTime); err != nil { - return fmt.Errorf("Error setting creation_time: %s", err) - } - if err := d.Set("etag", res.Etag); err != nil { - return fmt.Errorf("Error setting etag: %s", err) - } - if err := d.Set("last_modified_time", res.LastModifiedTime); err != nil { - return fmt.Errorf("Error setting last_modified_time: %s", err) - } - if err := d.Set("location", res.Location); err != nil { - return fmt.Errorf("Error setting location: %s", err) - } - if err := d.Set("num_bytes", res.NumBytes); err != nil { - return fmt.Errorf("Error setting num_bytes: %s", err) - } - if err := d.Set("table_id", res.TableReference.TableId); err != nil { - return fmt.Errorf("Error setting table_id: %s", err) - } - if err := d.Set("dataset_id", res.TableReference.DatasetId); err != nil { - return fmt.Errorf("Error setting dataset_id: %s", err) - } - if err := d.Set("num_long_term_bytes", res.NumLongTermBytes); err != nil { - return fmt.Errorf("Error setting num_long_term_bytes: %s", err) - } - if err := d.Set("num_rows", res.NumRows); err != nil { - return fmt.Errorf("Error setting num_rows: %s", err) - } - if err := d.Set("self_link", res.SelfLink); err != nil { - return fmt.Errorf("Error setting self_link: %s", err) - } - if err := d.Set("type", res.Type); err != nil { - return fmt.Errorf("Error setting type: %s", err) - } - - // determine whether the deprecated require_partition_filter field is used - use_old_rpf := false - if _, ok := d.GetOk("time_partitioning.0.require_partition_filter"); ok { - use_old_rpf = true - } else if err := d.Set("require_partition_filter", res.RequirePartitionFilter); err != nil { - return fmt.Errorf("Error setting require_partition_filter: %s", err) - } - - if res.ExternalDataConfiguration != nil { - externalDataConfiguration, err := flattenExternalDataConfiguration(res.ExternalDataConfiguration) - if err != nil { - return err - } - - if v, ok := d.GetOk("external_data_configuration"); ok { - // The API response doesn't return the `external_data_configuration.schema` - // used when creating the table and it cannot be queried. - // After creation, a computed schema is stored in the toplevel `schema`, - // which combines `external_data_configuration.schema` - // with any hive partioning fields found in the `source_uri_prefix`. - // So just assume the configured schema has been applied after successful - // creation, by copying the configured value back into the resource schema. - // This avoids that reading back this field will be identified as a change. - // The `ForceNew=true` on `external_data_configuration.schema` will ensure - // the users' expectation that changing the configured input schema will - // recreate the resource. - edc := v.([]interface{})[0].(map[string]interface{}) - if edc["schema"] != nil { - externalDataConfiguration[0]["schema"] = edc["schema"] - } - } - - if err := d.Set("external_data_configuration", externalDataConfiguration); err != nil { - return fmt.Errorf("Error setting external_data_configuration: %s", err) - } - } - - if res.TimePartitioning != nil { - if err := d.Set("time_partitioning", flattenTimePartitioning(res.TimePartitioning, use_old_rpf)); err != nil { - return err - } - } - - if res.RangePartitioning != nil { - if err := d.Set("range_partitioning", flattenRangePartitioning(res.RangePartitioning)); err != nil { - return err - } - } - - if res.Clustering != nil { - if err := d.Set("clustering", res.Clustering.Fields); err != nil { - return fmt.Errorf("Error setting clustering: %s", err) - } - } - if res.EncryptionConfiguration != nil { - if err := d.Set("encryption_configuration", flattenEncryptionConfiguration(res.EncryptionConfiguration)); err != nil { - return err - } - } - - if res.Schema != nil { - schema, err := flattenSchema(res.Schema) - if err != nil { - return err - } - if err := d.Set("schema", schema); err != nil { - return fmt.Errorf("Error setting schema: %s", err) - } - } - - if res.View != nil { - view := flattenView(res.View) - if err := d.Set("view", view); err != nil { - return fmt.Errorf("Error setting view: %s", err) - } - } - - if res.MaterializedView != nil { - materialized_view := flattenMaterializedView(res.MaterializedView) - - if err := d.Set("materialized_view", materialized_view); err != nil { - return fmt.Errorf("Error setting materialized view: %s", err) - } - } - - if res.TableConstraints != nil { - table_constraints := flattenTableConstraints(res.TableConstraints) - - if err := d.Set("table_constraints", table_constraints); err != nil { - return fmt.Errorf("Error setting table constraints: %s", err) - } - } - - {{ if ne $.TargetVersionName `ga` -}} - if err := d.Set("resource_tags", res.ResourceTags); err != nil { - return fmt.Errorf("Error setting resource tags: %s", err) - } - - {{ end }} - // TODO: Update when the Get API fields for TableReplicationInfo are available in the client library. - url, err := tpgresource.ReplaceVars(d, config, "{{"{{"}}BigQueryBasePath{{"}}"}}projects/{{"{{"}}project{{"}}"}}/datasets/{{"{{"}}dataset_id{{"}}"}}/tables/{{"{{"}}table_id{{"}}"}}") - if err != nil { - return err - } - - log.Printf("[INFO] Reading BigQuery table through API: %s", url) - - getRes, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ - Config: config, - Method: "GET", - RawURL: url, - UserAgent: userAgent, - }) - if err != nil { - return err - } - - if v, ok := getRes["tableReplicationInfo"]; ok { - tableReplicationInfo := flattenTableReplicationInfo(v.(map[string]interface{})) - - if err := d.Set("table_replication_info", tableReplicationInfo); err != nil { - return fmt.Errorf("Error setting table replication info: %s", err) - } - } - - return nil -} - -type TableReference struct { - project string - datasetID string - tableID string -} - -func resourceBigQueryTableUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*transport_tpg.Config) - userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - table, err := resourceTable(d, meta) - if err != nil { - return err - } - - log.Printf("[INFO] Updating BigQuery table: %s", d.Id()) - - project, err := tpgresource.GetProject(d, config) - if err != nil { - return err - } - - datasetID := d.Get("dataset_id").(string) - tableID := d.Get("table_id").(string) - - tableReference := &TableReference{ - project: project, - datasetID: datasetID, - tableID: tableID, - } - - if err = resourceBigQueryTableColumnDrop(config, userAgent, table, tableReference); err != nil { - return err - } - - if _, err = config.NewBigQueryClient(userAgent).Tables.Update(project, datasetID, tableID, table).Do(); err != nil { - return err - } - - return resourceBigQueryTableRead(d, meta) -} - -func resourceBigQueryTableColumnDrop(config *transport_tpg.Config, userAgent string, table *bigquery.Table, tableReference *TableReference) error { - oldTable, err := config.NewBigQueryClient(userAgent).Tables.Get(tableReference.project, tableReference.datasetID, tableReference.tableID).Do() - if err != nil { - return err - } - - if table.Schema == nil { - return nil - } - - newTableFields := map[string]bool{} - for _, field := range table.Schema.Fields { - newTableFields[field.Name] = true - } - - droppedColumns := []string{} - for _, field := range oldTable.Schema.Fields { - if !newTableFields[field.Name] { - droppedColumns = append(droppedColumns, field.Name) - } - } - - if len(droppedColumns) > 0 { - droppedColumnsString := strings.Join(droppedColumns, ", DROP COLUMN ") - - dropColumnsDDL := fmt.Sprintf("ALTER TABLE `%s.%s.%s` DROP COLUMN %s", tableReference.project, tableReference.datasetID, tableReference.tableID, droppedColumnsString) - log.Printf("[INFO] Dropping columns in-place: %s", dropColumnsDDL) - - useLegacySQL := false - req := &bigquery.QueryRequest{ - Query: dropColumnsDDL, - UseLegacySql: &useLegacySQL, - } - - _, err = config.NewBigQueryClient(userAgent).Jobs.Query(tableReference.project, req).Do() - if err != nil { - return err - } - } - - return nil -} - -func resourceBigQueryTableDelete(d *schema.ResourceData, meta interface{}) error { - if d.Get("deletion_protection").(bool) { - return fmt.Errorf("cannot destroy table %v without setting deletion_protection=false and running `terraform apply`", d.Id()) - } - {{- if ne $.TargetVersionName "ga" }} - if v, ok := d.GetOk("resource_tags"); ok { - if !d.Get("allow_resource_tags_on_deletion").(bool) { - var resourceTags []string - - for k, v := range v.(map[string]interface{}) { - resourceTags = append(resourceTags, fmt.Sprintf("%s:%s", k, v.(string))) - } - - return fmt.Errorf("cannot destroy table %v without unsetting the following resource tags or setting allow_resource_tags_on_deletion=true: %v", d.Id(), resourceTags) - } - } - - {{ end }} - config := meta.(*transport_tpg.Config) - userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - log.Printf("[INFO] Deleting BigQuery table: %s", d.Id()) - - project, err := tpgresource.GetProject(d, config) - if err != nil { - return err - } - - datasetID := d.Get("dataset_id").(string) - tableID := d.Get("table_id").(string) - - if err := config.NewBigQueryClient(userAgent).Tables.Delete(project, datasetID, tableID).Do(); err != nil { - return err - } - - d.SetId("") - - return nil -} - -func expandExternalDataConfiguration(cfg interface{}) (*bigquery.ExternalDataConfiguration, error) { - raw := cfg.([]interface{})[0].(map[string]interface{}) - - edc := &bigquery.ExternalDataConfiguration{ - Autodetect: raw["autodetect"].(bool), - } - - sourceUris := []string{} - for _, rawSourceUri := range raw["source_uris"].([]interface{}) { - sourceUris = append(sourceUris, rawSourceUri.(string)) - } - if len(sourceUris) > 0 { - edc.SourceUris = sourceUris - } - - if v, ok := raw["file_set_spec_type"]; ok { - edc.FileSetSpecType = v.(string) - } - - if v, ok := raw["compression"]; ok { - edc.Compression = v.(string) - } - - if v, ok := raw["json_extension"]; ok { - edc.JsonExtension = v.(string) - } - - if v, ok := raw["csv_options"]; ok { - edc.CsvOptions = expandCsvOptions(v) - } - if v, ok := raw["json_options"]; ok { - edc.JsonOptions = expandJsonOptions(v) - } - if v, ok := raw["bigtable_options"]; ok { - edc.BigtableOptions = expandBigtableOptions(v) - } - if v, ok := raw["google_sheets_options"]; ok { - edc.GoogleSheetsOptions = expandGoogleSheetsOptions(v) - } - if v, ok := raw["hive_partitioning_options"]; ok { - edc.HivePartitioningOptions = expandHivePartitioningOptions(v) - } - if v, ok := raw["avro_options"]; ok { - edc.AvroOptions = expandAvroOptions(v) - } - if v, ok := raw["parquet_options"]; ok { - edc.ParquetOptions = expandParquetOptions(v) - } - - if v, ok := raw["ignore_unknown_values"]; ok { - edc.IgnoreUnknownValues = v.(bool) - } - if v, ok := raw["max_bad_records"]; ok { - edc.MaxBadRecords = int64(v.(int)) - } - if v, ok := raw["schema"]; ok { - managePolicyTags := true - schema, err := expandSchema(v, managePolicyTags) - if err != nil { - return nil, err - } - edc.Schema = schema - } - if v, ok := raw["source_format"]; ok { - edc.SourceFormat = v.(string) - } - if v, ok := raw["connection_id"]; ok { - edc.ConnectionId = v.(string) - } - if v, ok := raw["reference_file_schema_uri"]; ok { - edc.ReferenceFileSchemaUri = v.(string) - } - if v, ok := raw["metadata_cache_mode"]; ok { - edc.MetadataCacheMode = v.(string) - } - if v, ok := raw["object_metadata"]; ok { - edc.ObjectMetadata = v.(string) - } - - return edc, nil - -} - -func flattenExternalDataConfiguration(edc *bigquery.ExternalDataConfiguration) ([]map[string]interface{}, error) { - result := map[string]interface{}{} - - result["autodetect"] = edc.Autodetect - result["source_uris"] = edc.SourceUris - - if edc.FileSetSpecType != "" { - result["file_set_spec_type"] = edc.FileSetSpecType - } - - if edc.Compression != "" { - result["compression"] = edc.Compression - } - - if edc.JsonExtension != "" { - result["json_extension"] = edc.JsonExtension - } - - if edc.CsvOptions != nil { - result["csv_options"] = flattenCsvOptions(edc.CsvOptions) - } - - if edc.GoogleSheetsOptions != nil { - result["google_sheets_options"] = flattenGoogleSheetsOptions(edc.GoogleSheetsOptions) - } - - if edc.HivePartitioningOptions != nil { - result["hive_partitioning_options"] = flattenHivePartitioningOptions(edc.HivePartitioningOptions) - } - - if edc.AvroOptions != nil { - result["avro_options"] = flattenAvroOptions(edc.AvroOptions) - } - - if edc.ParquetOptions != nil { - result["parquet_options"] = flattenParquetOptions(edc.ParquetOptions) - } - - if edc.JsonOptions != nil { - result["json_options"] = flattenJsonOptions(edc.JsonOptions) - } - - if edc.BigtableOptions != nil { - result["bigtable_options"] = flattenBigtableOptions(edc.BigtableOptions) - } - - if edc.IgnoreUnknownValues { - result["ignore_unknown_values"] = edc.IgnoreUnknownValues - } - if edc.MaxBadRecords != 0 { - result["max_bad_records"] = edc.MaxBadRecords - } - - if edc.SourceFormat != "" { - result["source_format"] = edc.SourceFormat - } - - if edc.ConnectionId != "" { - result["connection_id"] = edc.ConnectionId - } - - if edc.ReferenceFileSchemaUri != "" { - result["reference_file_schema_uri"] = edc.ReferenceFileSchemaUri - } - if edc.MetadataCacheMode != "" { - result["metadata_cache_mode"] = edc.MetadataCacheMode - } - - if edc.ObjectMetadata != "" { - result["object_metadata"] = edc.ObjectMetadata - } - - return []map[string]interface{}{result}, nil -} - -func expandCsvOptions(configured interface{}) *bigquery.CsvOptions { - if len(configured.([]interface{})) == 0 { - return nil - } - - raw := configured.([]interface{})[0].(map[string]interface{}) - opts := &bigquery.CsvOptions{} - - if v, ok := raw["allow_jagged_rows"]; ok { - opts.AllowJaggedRows = v.(bool) - opts.ForceSendFields = append(opts.ForceSendFields, "allow_jagged_rows") - } - - if v, ok := raw["allow_quoted_newlines"]; ok { - opts.AllowQuotedNewlines = v.(bool) - opts.ForceSendFields = append(opts.ForceSendFields, "allow_quoted_newlines") - } - - if v, ok := raw["encoding"]; ok { - opts.Encoding = v.(string) - } - - if v, ok := raw["field_delimiter"]; ok { - opts.FieldDelimiter = v.(string) - } - - if v, ok := raw["skip_leading_rows"]; ok { - opts.SkipLeadingRows = int64(v.(int)) - } - - if v, ok := raw["quote"]; ok { - quote := v.(string) - opts.Quote = "e - } - - opts.ForceSendFields = []string{"Quote"} - - return opts -} - -func flattenCsvOptions(opts *bigquery.CsvOptions) []map[string]interface{} { - result := map[string]interface{}{} - - if opts.AllowJaggedRows { - result["allow_jagged_rows"] = opts.AllowJaggedRows - } - - if opts.AllowQuotedNewlines { - result["allow_quoted_newlines"] = opts.AllowQuotedNewlines - } - - if opts.Encoding != "" { - result["encoding"] = opts.Encoding - } - - if opts.FieldDelimiter != "" { - result["field_delimiter"] = opts.FieldDelimiter - } - - if opts.SkipLeadingRows != 0 { - result["skip_leading_rows"] = opts.SkipLeadingRows - } - - if opts.Quote != nil { - result["quote"] = *opts.Quote - } - - return []map[string]interface{}{result} -} - -func expandGoogleSheetsOptions(configured interface{}) *bigquery.GoogleSheetsOptions { - if len(configured.([]interface{})) == 0 { - return nil - } - - raw := configured.([]interface{})[0].(map[string]interface{}) - opts := &bigquery.GoogleSheetsOptions{} - - if v, ok := raw["range"]; ok { - opts.Range = v.(string) - } - - if v, ok := raw["skip_leading_rows"]; ok { - opts.SkipLeadingRows = int64(v.(int)) - } - return opts -} - -func flattenGoogleSheetsOptions(opts *bigquery.GoogleSheetsOptions) []map[string]interface{} { - result := map[string]interface{}{} - - if opts.Range != "" { - result["range"] = opts.Range - } - - if opts.SkipLeadingRows != 0 { - result["skip_leading_rows"] = opts.SkipLeadingRows - } - - return []map[string]interface{}{result} -} - -func expandHivePartitioningOptions(configured interface{}) *bigquery.HivePartitioningOptions { - if len(configured.([]interface{})) == 0 { - return nil - } - - raw := configured.([]interface{})[0].(map[string]interface{}) - opts := &bigquery.HivePartitioningOptions{} - - if v, ok := raw["mode"]; ok { - opts.Mode = v.(string) - } - - if v, ok := raw["require_partition_filter"]; ok { - opts.RequirePartitionFilter = v.(bool) - } - - if v, ok := raw["source_uri_prefix"]; ok { - opts.SourceUriPrefix = v.(string) - } - - return opts -} - -func flattenHivePartitioningOptions(opts *bigquery.HivePartitioningOptions) []map[string]interface{} { - result := map[string]interface{}{} - - if opts.Mode != "" { - result["mode"] = opts.Mode - } - - if opts.RequirePartitionFilter { - result["require_partition_filter"] = opts.RequirePartitionFilter - } - - if opts.SourceUriPrefix != "" { - result["source_uri_prefix"] = opts.SourceUriPrefix - } - - return []map[string]interface{}{result} -} - -func expandAvroOptions(configured interface{}) *bigquery.AvroOptions { - if len(configured.([]interface{})) == 0 { - return nil - } - - raw := configured.([]interface{})[0].(map[string]interface{}) - opts := &bigquery.AvroOptions{} - - if v, ok := raw["use_avro_logical_types"]; ok { - opts.UseAvroLogicalTypes = v.(bool) - } - - return opts -} - -func flattenAvroOptions(opts *bigquery.AvroOptions) []map[string]interface{} { - result := map[string]interface{}{} - - if opts.UseAvroLogicalTypes { - result["use_avro_logical_types"] = opts.UseAvroLogicalTypes - } - - return []map[string]interface{}{result} -} - -func expandParquetOptions(configured interface{}) *bigquery.ParquetOptions { - if len(configured.([]interface{})) == 0 { - return nil - } - - raw := configured.([]interface{})[0].(map[string]interface{}) - opts := &bigquery.ParquetOptions{} - - if v, ok := raw["enum_as_string"]; ok { - opts.EnumAsString = v.(bool) - } - - if v, ok := raw["enable_list_inference"]; ok { - opts.EnableListInference = v.(bool) - } - - return opts -} - -func flattenParquetOptions(opts *bigquery.ParquetOptions) []map[string]interface{} { - result := map[string]interface{}{} - - if opts.EnumAsString { - result["enum_as_string"] = opts.EnumAsString - } - - if opts.EnableListInference { - result["enable_list_inference"] = opts.EnableListInference - } - - return []map[string]interface{}{result} -} - -func expandBigtableOptions(configured interface{}) *bigquery.BigtableOptions { - if len(configured.([]interface{})) == 0 { - return nil - } - - raw := configured.([]interface{})[0].(map[string]interface{}) - opts := &bigquery.BigtableOptions{} - - crs := []*bigquery.BigtableColumnFamily{} - if v, ok := raw["column_family"]; ok { - for _, columnFamily := range v.([]interface{}) { - crs = append(crs, expandBigtableColumnFamily(columnFamily)) - } - - if len(crs) > 0 { - opts.ColumnFamilies = crs - } - } - - if v, ok := raw["ignore_unspecified_column_families"]; ok { - opts.IgnoreUnspecifiedColumnFamilies = v.(bool) - } - - if v, ok := raw["read_rowkey_as_string"]; ok { - opts.ReadRowkeyAsString = v.(bool) - } - - if v, ok := raw["output_column_families_as_json"]; ok { - opts.OutputColumnFamiliesAsJson = v.(bool) - } - - return opts -} - -func flattenBigtableOptions(opts *bigquery.BigtableOptions) []map[string]interface{} { - result := map[string]interface{}{} - - if opts.ColumnFamilies != nil { - result["column_family"] = flattenBigtableColumnFamily(opts.ColumnFamilies) - } - - if opts.IgnoreUnspecifiedColumnFamilies { - result["ignore_unspecified_column_families"] = opts.IgnoreUnspecifiedColumnFamilies - } - - if opts.ReadRowkeyAsString { - result["read_rowkey_as_string"] = opts.ReadRowkeyAsString - } - - if opts.OutputColumnFamiliesAsJson { - result["output_column_families_as_json"] = opts.OutputColumnFamiliesAsJson - } - - return []map[string]interface{}{result} -} - -func expandBigtableColumnFamily(configured interface{}) *bigquery.BigtableColumnFamily { - raw := configured.(map[string]interface{}) - - opts := &bigquery.BigtableColumnFamily{} - - crs := []*bigquery.BigtableColumn{} - if v, ok := raw["column"]; ok { - for _, column := range v.([]interface{}) { - crs = append(crs, expandBigtableColumn(column)) - } - - if len(crs) > 0 { - opts.Columns = crs - } - } - - if v, ok := raw["family_id"]; ok { - opts.FamilyId = v.(string) - } - - if v, ok := raw["type"]; ok { - opts.Type = v.(string) - } - - if v, ok := raw["encoding"]; ok { - opts.Encoding = v.(string) - } - - if v, ok := raw["only_read_latest"]; ok { - opts.OnlyReadLatest = v.(bool) - } - - return opts -} - -func flattenBigtableColumnFamily(edc []*bigquery.BigtableColumnFamily) []map[string]interface{} { - results := []map[string]interface{}{} - - for _, fr := range edc { - result := map[string]interface{}{} - if fr.Columns != nil { - result["column"] = flattenBigtableColumn(fr.Columns) - } - result["family_id"] = fr.FamilyId - result["type"] = fr.Type - result["encoding"] = fr.Encoding - result["only_read_latest"] = fr.OnlyReadLatest - results = append(results, result) - } - - return results -} - -func expandBigtableColumn(configured interface{}) *bigquery.BigtableColumn { - raw := configured.(map[string]interface{}) - - opts := &bigquery.BigtableColumn{} - - if v, ok := raw["qualifier_encoded"]; ok { - opts.QualifierEncoded = v.(string) - } - - if v, ok := raw["qualifier_string"]; ok { - opts.QualifierString = v.(string) - } - - if v, ok := raw["field_name"]; ok { - opts.FieldName = v.(string) - } - - if v, ok := raw["type"]; ok { - opts.Type = v.(string) - } - - if v, ok := raw["encoding"]; ok { - opts.Encoding = v.(string) - } - - if v, ok := raw["only_read_latest"]; ok { - opts.OnlyReadLatest = v.(bool) - } - - return opts -} - -func flattenBigtableColumn(edc []*bigquery.BigtableColumn) []map[string]interface{} { - results := []map[string]interface{}{} - - for _, fr := range edc { - result := map[string]interface{}{} - result["qualifier_encoded"] = fr.QualifierEncoded - result["qualifier_string"] = fr.QualifierString - result["field_name"] = fr.FieldName - result["type"] = fr.Type - result["encoding"] = fr.Encoding - result["only_read_latest"] = fr.OnlyReadLatest - results = append(results, result) - } - - return results -} - -func expandJsonOptions(configured interface{}) *bigquery.JsonOptions { - if len(configured.([]interface{})) == 0 { - return nil - } - - raw := configured.([]interface{})[0].(map[string]interface{}) - opts := &bigquery.JsonOptions{} - - if v, ok := raw["encoding"]; ok { - opts.Encoding = v.(string) - } - - return opts -} - -func flattenJsonOptions(opts *bigquery.JsonOptions) []map[string]interface{} { - result := map[string]interface{}{} - - if opts.Encoding != "" { - result["encoding"] = opts.Encoding - } - - return []map[string]interface{}{result} -} - -func expandSchema(raw interface{}, managePolicyTags bool) (*bigquery.TableSchema, error) { - var fields []*bigquery.TableFieldSchema - - if len(raw.(string)) == 0 { - return nil, nil - } - - if err := json.Unmarshal([]byte(raw.(string)), &fields); err != nil { - return nil, err - } - - if managePolicyTags { - for _, field := range fields { - setEmptyPolicyTagsInSchema(field) - } - } - - return &bigquery.TableSchema{Fields: fields}, nil -} - -func flattenSchema(tableSchema *bigquery.TableSchema) (string, error) { - schema, err := json.Marshal(tableSchema.Fields) - if err != nil { - return "", err - } - - return string(schema), nil -} - -// Explicitly set empty PolicyTags unless the PolicyTags field is specified in the schema. -func setEmptyPolicyTagsInSchema(field *bigquery.TableFieldSchema) { - // Field has children fields. - if len(field.Fields) > 0 { - for _, subField := range field.Fields { - setEmptyPolicyTagsInSchema(subField) - } - return - } - // Field is a leaf. - if field.PolicyTags == nil { - field.PolicyTags = &bigquery.TableFieldSchemaPolicyTags{Names: []string{}} - } -} - -func expandTimePartitioning(configured interface{}) *bigquery.TimePartitioning { - raw := configured.([]interface{})[0].(map[string]interface{}) - tp := &bigquery.TimePartitioning{Type: raw["type"].(string)} - - if v, ok := raw["field"]; ok { - tp.Field = v.(string) - } - - if v, ok := raw["expiration_ms"]; ok { - tp.ExpirationMs = int64(v.(int)) - } - - if v, ok := raw["require_partition_filter"]; ok { - tp.RequirePartitionFilter = v.(bool) - } - - return tp -} - -func expandRangePartitioning(configured interface{}) (*bigquery.RangePartitioning, error) { - if configured == nil { - return nil, nil - } - - rpList := configured.([]interface{}) - if len(rpList) == 0 || rpList[0] == nil { - return nil, errors.New("Error casting range partitioning interface to expected structure") - } - - rangePartJson := rpList[0].(map[string]interface{}) - rp := &bigquery.RangePartitioning{ - Field: rangePartJson["field"].(string), - } - - if v, ok := rangePartJson["range"]; ok && v != nil { - rangeLs := v.([]interface{}) - if len(rangeLs) != 1 || rangeLs[0] == nil { - return nil, errors.New("Non-empty range must be given for range partitioning") - } - - rangeJson := rangeLs[0].(map[string]interface{}) - rp.Range = &bigquery.RangePartitioningRange{ - Start: int64(rangeJson["start"].(int)), - End: int64(rangeJson["end"].(int)), - Interval: int64(rangeJson["interval"].(int)), - ForceSendFields: []string{"Start"}, - } - } - - return rp, nil -} - -func flattenEncryptionConfiguration(ec *bigquery.EncryptionConfiguration) []map[string]interface{} { - re := regexp.MustCompile(`(projects/.*/locations/.*/keyRings/.*/cryptoKeys/.*)/cryptoKeyVersions/.*`) - paths := re.FindStringSubmatch(ec.KmsKeyName) - - if len(ec.KmsKeyName) == 0 { - return nil - } - - if len(paths) > 0 { - return []map[string]interface{}{ - { - "kms_key_name": paths[1], - "kms_key_version": ec.KmsKeyName, - }, - } - } - - // The key name was returned, no need to set the version - return []map[string]interface{}{{"{{"}}"kms_key_name": ec.KmsKeyName, "kms_key_version": ""{{"}}"}} -} - -func flattenTimePartitioning(tp *bigquery.TimePartitioning, use_old_rpf bool) []map[string]interface{} { - result := map[string]interface{}{"type": tp.Type} - - if tp.Field != "" { - result["field"] = tp.Field - } - - if tp.ExpirationMs != 0 { - result["expiration_ms"] = tp.ExpirationMs - } - - if tp.RequirePartitionFilter && use_old_rpf { - result["require_partition_filter"] = tp.RequirePartitionFilter - } - - return []map[string]interface{}{result} -} - -func flattenRangePartitioning(rp *bigquery.RangePartitioning) []map[string]interface{} { - result := map[string]interface{}{ - "field": rp.Field, - "range": []map[string]interface{}{ - { - "start": rp.Range.Start, - "end": rp.Range.End, - "interval": rp.Range.Interval, - }, - }, - } - - return []map[string]interface{}{result} -} - -func expandView(configured interface{}) *bigquery.ViewDefinition { - raw := configured.([]interface{})[0].(map[string]interface{}) - vd := &bigquery.ViewDefinition{Query: raw["query"].(string)} - - if v, ok := raw["use_legacy_sql"]; ok { - vd.UseLegacySql = v.(bool) - vd.ForceSendFields = append(vd.ForceSendFields, "UseLegacySql") - } - - return vd -} - -func flattenView(vd *bigquery.ViewDefinition) []map[string]interface{} { - result := map[string]interface{}{"query": vd.Query} - result["use_legacy_sql"] = vd.UseLegacySql - - return []map[string]interface{}{result} -} - -func expandMaterializedView(configured interface{}) *bigquery.MaterializedViewDefinition { - raw := configured.([]interface{})[0].(map[string]interface{}) - mvd := &bigquery.MaterializedViewDefinition{Query: raw["query"].(string)} - - if v, ok := raw["enable_refresh"]; ok { - mvd.EnableRefresh = v.(bool) - mvd.ForceSendFields = append(mvd.ForceSendFields, "EnableRefresh") - } - - if v, ok := raw["refresh_interval_ms"]; ok { - mvd.RefreshIntervalMs = int64(v.(int)) - mvd.ForceSendFields = append(mvd.ForceSendFields, "RefreshIntervalMs") - } - - if v, ok := raw["allow_non_incremental_definition"]; ok { - mvd.AllowNonIncrementalDefinition = v.(bool) - mvd.ForceSendFields = append(mvd.ForceSendFields, "AllowNonIncrementalDefinition") - } - - return mvd -} - -func flattenMaterializedView(mvd *bigquery.MaterializedViewDefinition) []map[string]interface{} { - result := map[string]interface{}{"query": mvd.Query} - result["enable_refresh"] = mvd.EnableRefresh - result["refresh_interval_ms"] = mvd.RefreshIntervalMs - result["allow_non_incremental_definition"] = mvd.AllowNonIncrementalDefinition - - return []map[string]interface{}{result} -} - -func expandPrimaryKey(configured interface{}) *bigquery.TableConstraintsPrimaryKey { - if len(configured.([]interface{})) == 0 { - return nil - } - - raw := configured.([]interface{})[0].(map[string]interface{}) - pk := &bigquery.TableConstraintsPrimaryKey{} - - columns := []string{} - for _, rawColumn := range raw["columns"].([]interface{}) { - if rawColumn == nil { - // Terraform reads "" as nil, which ends up crashing when we cast below - // sending "" to the API triggers a 400, which is okay. - rawColumn = "" - } - columns = append(columns, rawColumn.(string)) - } - if len(columns) > 0 { - pk.Columns = columns - } - - return pk -} - -func flattenPrimaryKey(edc *bigquery.TableConstraintsPrimaryKey) []map[string]interface{} { - result := map[string]interface{}{} - - if edc.Columns != nil { - result["columns"] = edc.Columns - } - - return []map[string]interface{}{result} -} - -func expandReferencedTable(configured interface{}) *bigquery.TableConstraintsForeignKeysReferencedTable { - raw := configured.([]interface{})[0].(map[string]interface{}) - rt := &bigquery.TableConstraintsForeignKeysReferencedTable{} - - if v, ok := raw["project_id"]; ok { - rt.ProjectId = v.(string) - } - if v, ok := raw["dataset_id"]; ok { - rt.DatasetId = v.(string) - } - if v, ok := raw["table_id"]; ok { - rt.TableId = v.(string) - } - - return rt -} - -func flattenReferencedTable(edc *bigquery.TableConstraintsForeignKeysReferencedTable) []map[string]interface{} { - result := map[string]interface{}{} - - result["project_id"] = edc.ProjectId - result["dataset_id"] = edc.DatasetId - result["table_id"] = edc.TableId - - return []map[string]interface{}{result} -} - -func expandColumnReference(configured interface{}) *bigquery.TableConstraintsForeignKeysColumnReferences { - raw := configured.(map[string]interface{}) - - cr := &bigquery.TableConstraintsForeignKeysColumnReferences{} - - if v, ok := raw["referencing_column"]; ok { - cr.ReferencingColumn = v.(string) - } - if v, ok := raw["referenced_column"]; ok { - cr.ReferencedColumn = v.(string) - } - - return cr -} - -func flattenColumnReferences(edc []*bigquery.TableConstraintsForeignKeysColumnReferences) []map[string]interface{} { - results := []map[string]interface{}{} - - for _, cr := range edc { - result := map[string]interface{}{} - result["referenced_column"] = cr.ReferencedColumn - result["referencing_column"] = cr.ReferencingColumn - results = append(results, result) - } - - return results -} - -func expandForeignKey(configured interface{}) *bigquery.TableConstraintsForeignKeys { - raw := configured.(map[string]interface{}) - - fk := &bigquery.TableConstraintsForeignKeys{} - if v, ok := raw["name"]; ok { - fk.Name = v.(string) - } - if v, ok := raw["referenced_table"]; ok { - fk.ReferencedTable = expandReferencedTable(v) - } - crs := []*bigquery.TableConstraintsForeignKeysColumnReferences{} - if v, ok := raw["column_references"]; ok { - for _, rawColumnReferences := range v.([]interface{}) { - crs = append(crs, expandColumnReference(rawColumnReferences)) - } - } - - if len(crs) > 0 { - fk.ColumnReferences = crs - } - - return fk -} - -func flattenForeignKeys(edc []*bigquery.TableConstraintsForeignKeys) []map[string]interface{} { - results := []map[string]interface{}{} - - for _, fr := range edc { - result := map[string]interface{}{} - result["name"] = fr.Name - result["column_references"] = flattenColumnReferences(fr.ColumnReferences) - result["referenced_table"] = flattenReferencedTable(fr.ReferencedTable) - results = append(results, result) - } - - return results -} - -func expandTableConstraints(cfg interface{}) (*bigquery.TableConstraints, error) { - raw := cfg.([]interface{})[0].(map[string]interface{}) - - edc := &bigquery.TableConstraints{} - - if v, ok := raw["primary_key"]; ok { - edc.PrimaryKey = expandPrimaryKey(v) - } - - fks := []*bigquery.TableConstraintsForeignKeys{} - - if v, ok := raw["foreign_keys"]; ok { - for _, rawForeignKey := range v.([]interface{}) { - fks = append(fks, expandForeignKey(rawForeignKey)) - } - } - - if len(fks) > 0 { - edc.ForeignKeys = fks - } - - return edc, nil - -} - -func flattenTableConstraints(edc *bigquery.TableConstraints) []map[string]interface{} { - result := map[string]interface{}{} - - if edc.PrimaryKey != nil { - result["primary_key"] = flattenPrimaryKey(edc.PrimaryKey) - } - if edc.ForeignKeys != nil { - result["foreign_keys"] = flattenForeignKeys(edc.ForeignKeys) - } - - return []map[string]interface{}{result} -} - -func expandTableReplicationInfo(cfg interface{}) map[string]interface{} { - raw := cfg.([]interface{})[0].(map[string]interface{}) - - result := map[string]interface{}{} - - if v, ok := raw["source_project_id"]; ok { - result["source_project_id"] = v.(string) - } - - if v, ok := raw["source_dataset_id"]; ok { - result["source_dataset_id"] = v.(string) - } - - if v, ok := raw["source_table_id"]; ok { - result["source_table_id"] = v.(string) - } - - if v, ok := raw["replication_interval_ms"]; ok { - result["replication_interval_ms"] = int64(v.(int)) - } - - return result -} - -func flattenTableReplicationInfo(tableReplicationInfo map[string]interface{}) []map[string]interface{} { - result := map[string]interface{}{} - - if v, ok := tableReplicationInfo["sourceTable"]; ok { - sourceTable := v.(map[string]interface{}) - if v, ok := sourceTable["projectId"]; ok { - result["source_project_id"] = v.(string) - } - if v, ok := sourceTable["datasetId"]; ok { - result["source_dataset_id"] = v.(string) - } - if v, ok := sourceTable["tableId"]; ok { - result["source_table_id"] = v.(string) - } - } - - if v, ok := tableReplicationInfo["replicationIntervalMs"]; ok { - replicationIntervalMs := v.(string) - if i, err := strconv.Atoi(replicationIntervalMs); err == nil { - result["replication_interval_ms"] = int64(i) - } - } - - return []map[string]interface{}{result} -} - -func resourceBigQueryTableImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*transport_tpg.Config) - if err := tpgresource.ParseImportId([]string{ - "projects/(?P[^/]+)/datasets/(?P[^/]+)/tables/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - // Explicitly set virtual fields to default values on import - if err := d.Set("deletion_protection", true); err != nil { - return nil, fmt.Errorf("Error setting deletion_protection: %s", err) - } - {{- if ne $.TargetVersionName "ga" }} - if err := d.Set("allow_resource_tags_on_deletion", false); err != nil { - return nil, fmt.Errorf("Error setting allow_resource_tags_on_deletion: %s", err) - } - {{- end }} - - // Replace import id for the resource id - id, err := tpgresource.ReplaceVars(d, config, "projects/{{"{{"}}project{{"}}"}}/datasets/{{"{{"}}dataset_id{{"}}"}}/tables/{{"{{"}}table_id{{"}}"}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} diff --git a/mmv1/third_party/terraform/services/bigquery/go/resource_bigquery_table_test.go.tmpl b/mmv1/third_party/terraform/services/bigquery/go/resource_bigquery_table_test.go.tmpl deleted file mode 100644 index 20439416efb3..000000000000 --- a/mmv1/third_party/terraform/services/bigquery/go/resource_bigquery_table_test.go.tmpl +++ /dev/null @@ -1,4261 +0,0 @@ -package bigquery_test - -import ( - "fmt" - "regexp" - "strings" - "testing" - - "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/terraform" - "github.com/hashicorp/terraform-provider-google/google/acctest" - "github.com/hashicorp/terraform-provider-google/google/envvar" -) - -func TestAccBigQueryTable_Basic(t *testing.T) { - t.Parallel() - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableTimePartitioning(datasetID, tableID, "DAY"), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - { - Config: testAccBigQueryTableUpdated(datasetID, tableID), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - }, - }) -} - -func TestAccBigQueryTable_DropColumns(t *testing.T) { - t.Parallel() - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableTimePartitioningDropColumns(datasetID, tableID), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - { - Config: testAccBigQueryTableTimePartitioningDropColumnsUpdate(datasetID, tableID), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - }, - }) -} - -func TestAccBigQueryTable_Kms(t *testing.T) { - t.Parallel() - resourceName := "google_bigquery_table.test" - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - kms := acctest.BootstrapKMSKey(t) - cryptoKeyName := kms.CryptoKey.Name - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableKms(cryptoKeyName, datasetID, tableID), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - }, - }) -} - -func TestAccBigQueryTable_HourlyTimePartitioning(t *testing.T) { - t.Parallel() - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableTimePartitioning(datasetID, tableID, "HOUR"), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - { - Config: testAccBigQueryTableUpdated(datasetID, tableID), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - }, - }) -} - -func TestAccBigQueryTable_MonthlyTimePartitioning(t *testing.T) { - t.Parallel() - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableTimePartitioning(datasetID, tableID, "MONTH"), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - { - Config: testAccBigQueryTableUpdated(datasetID, tableID), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - }, - }) -} - -func TestAccBigQueryTable_YearlyTimePartitioning(t *testing.T) { - t.Parallel() - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableTimePartitioning(datasetID, tableID, "YEAR"), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - { - Config: testAccBigQueryTableUpdated(datasetID, tableID), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - }, - }) -} - -func TestAccBigQueryTable_HivePartitioning(t *testing.T) { - t.Parallel() - bucketName := acctest.TestBucketName(t) - resourceName := "google_bigquery_table.test" - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableHivePartitioning(bucketName, datasetID, tableID), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - }, - }) -} - -func TestAccBigQueryTable_HivePartitioningCustomSchema(t *testing.T) { - t.Parallel() - bucketName := acctest.TestBucketName(t) - resourceName := "google_bigquery_table.test" - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableHivePartitioningCustomSchema(bucketName, datasetID, tableID), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"external_data_configuration.0.schema", "deletion_protection"}, - }, - }, - }) -} - -func TestAccBigQueryTable_AvroPartitioning(t *testing.T) { - t.Parallel() - bucketName := acctest.TestBucketName(t) - resourceName := "google_bigquery_table.test" - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - avroFilePath := "./test-fixtures/avro-generated.avro" - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableAvroPartitioning(bucketName, avroFilePath, datasetID, tableID), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - }, - }) -} - -func TestAccBigQueryExternalDataTable_json(t *testing.T) { - t.Parallel() - bucketName := acctest.TestBucketName(t) - resourceName := "google_bigquery_table.test" - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableJson(datasetID, tableID, bucketName, "UTF-8"), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"external_data_configuration.0.schema", "deletion_protection"}, - }, - { - Config: testAccBigQueryTableJson(datasetID, tableID, bucketName, "UTF-16BE"), - }, - }, - }) -} - -func TestAccBigQueryTable_RangePartitioning(t *testing.T) { - t.Parallel() - resourceName := "google_bigquery_table.test" - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableRangePartitioning(datasetID, tableID), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - }, - }) -} - -func TestAccBigQueryTable_PrimaryKey(t *testing.T) { - t.Parallel() - resourceName := "google_bigquery_table.test" - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTablePrimaryKey(datasetID, tableID), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - }, - }) -} - -func TestAccBigQueryTable_ForeignKey(t *testing.T) { - t.Parallel() - resourceName := "google_bigquery_table.test" - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID_pk := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID_fk := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - projectID := envvar.GetTestProjectFromEnv() - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableForeignKeys(projectID, datasetID, tableID_pk, tableID_fk), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - }, - }) -} - -func TestAccBigQueryTable_updateTableConstraints(t *testing.T) { - t.Parallel() - resourceName := "google_bigquery_table.test" - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID_pk := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID_fk := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - projectID := envvar.GetTestProjectFromEnv() - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableForeignKeys(projectID, datasetID, tableID_pk, tableID_fk), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - { - Config: testAccBigQueryTableTableConstraintsUpdate(projectID, datasetID, tableID_pk, tableID_fk), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - }, - }) -} - -func TestAccBigQueryTable_View(t *testing.T) { - t.Parallel() - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableWithView(datasetID, tableID), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - }, - }) -} - -func TestAccBigQueryTable_updateView(t *testing.T) { - t.Parallel() - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableWithView(datasetID, tableID), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - { - Config: testAccBigQueryTableWithNewSqlView(datasetID, tableID), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - }, - }) -} - -func TestAccBigQueryTable_WithViewAndSchema(t *testing.T) { - t.Parallel() - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableWithViewAndSchema(datasetID, tableID, "table description1"), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - { - Config: testAccBigQueryTableWithViewAndSchema(datasetID, tableID, "table description2"), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - }, - }) -} - -func TestAccBigQueryTable_MaterializedView_DailyTimePartioning_Basic(t *testing.T) { - t.Parallel() - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - materialized_viewID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - query := fmt.Sprintf("SELECT count(some_string) as count, some_int, ts FROM `%s.%s` WHERE DATE(ts) = '2019-01-01' GROUP BY some_int, ts", datasetID, tableID) - queryNew := strings.ReplaceAll(query, "2019", "2020") - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableWithMatViewDailyTimePartitioning_basic(datasetID, tableID, materialized_viewID, query), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"etag", "last_modified_time", "deletion_protection"}, - }, - { - ResourceName: "google_bigquery_table.mv_test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"etag", "last_modified_time", "deletion_protection"}, - }, - { - Config: testAccBigQueryTableWithMatViewDailyTimePartitioning_basic(datasetID, tableID, materialized_viewID, queryNew), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"etag", "last_modified_time", "deletion_protection"}, - }, - { - ResourceName: "google_bigquery_table.mv_test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"etag", "last_modified_time", "deletion_protection"}, - }, - }, - }) -} - -func TestAccBigQueryTable_MaterializedView_DailyTimePartioning_Update(t *testing.T) { - t.Parallel() - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - materialized_viewID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - query := fmt.Sprintf("SELECT count(some_string) as count, some_int, ts FROM `%s.%s` WHERE DATE(ts) = '2019-01-01' GROUP BY some_int, ts", datasetID, tableID) - - enable_refresh := "false" - refresh_interval_ms := "3600000" - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableWithMatViewDailyTimePartitioning_basic(datasetID, tableID, materialized_viewID, query), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"etag", "last_modified_time", "deletion_protection"}, - }, - { - ResourceName: "google_bigquery_table.mv_test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"etag", "last_modified_time", "deletion_protection"}, - }, - { - Config: testAccBigQueryTableWithMatViewDailyTimePartitioning(datasetID, tableID, materialized_viewID, enable_refresh, refresh_interval_ms, query), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"etag", "last_modified_time", "deletion_protection"}, - }, - { - ResourceName: "google_bigquery_table.mv_test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"etag", "last_modified_time", "deletion_protection"}, - }, - }, - }) -} - -func TestAccBigQueryTable_MaterializedView_NonIncremental_basic(t *testing.T) { - t.Parallel() - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - materialized_viewID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - query := fmt.Sprintf("SELECT count(some_string) as count, some_int, ts FROM `%s.%s` WHERE DATE(ts) = '2019-01-01' GROUP BY some_int, ts", datasetID, tableID) - maxStaleness := "0-0 0 10:0:0" - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableWithMatViewNonIncremental_basic(datasetID, tableID, materialized_viewID, query, maxStaleness), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"etag", "last_modified_time", "deletion_protection", "require_partition_filter", "time_partitioning.0.require_partition_filter"}, - }, - { - ResourceName: "google_bigquery_table.mv_test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"etag", "last_modified_time", "deletion_protection", "require_partition_filter", "time_partitioning.0.require_partition_filter"}, - }, - }, - }) -} - -func TestAccBigQueryExternalDataTable_parquet(t *testing.T) { - t.Parallel() - - bucketName := acctest.TestBucketName(t) - objectName := fmt.Sprintf("tf_test_%s.gz.parquet", acctest.RandString(t, 10)) - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableFromGCSParquet(datasetID, tableID, bucketName, objectName), - }, - }, - }) -} - -func TestAccBigQueryExternalDataTable_parquetOptions(t *testing.T) { - t.Parallel() - - bucketName := acctest.TestBucketName(t) - objectName := fmt.Sprintf("tf_test_%s.gz.parquet", acctest.RandString(t, 10)) - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableFromGCSParquetOptions(datasetID, tableID, bucketName, objectName, true, true), - }, - { - Config: testAccBigQueryTableFromGCSParquetOptions(datasetID, tableID, bucketName, objectName, false, false), - }, - }, - }) -} - -func TestAccBigQueryExternalDataTable_iceberg(t *testing.T) { - t.Parallel() - - bucketName := acctest.TestBucketName(t) - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableFromGCSIceberg(datasetID, tableID, bucketName), - }, - }, - }) -} - -func TestAccBigQueryExternalDataTable_parquetFileSetSpecType(t *testing.T) { - t.Parallel() - - bucketName := acctest.TestBucketName(t) - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - parquetFileName := "test.parquet" - manifestName := fmt.Sprintf("tf_test_%s.manifest.json", acctest.RandString(t, 10)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableFromGCSParquetManifest(datasetID, tableID, bucketName, manifestName, parquetFileName), - }, - }, - }) -} - -func TestAccBigQueryExternalDataTable_queryAcceleration(t *testing.T) { - t.Parallel() - - bucketName := acctest.TestBucketName(t) - objectName := fmt.Sprintf("tf_test_%s.gz.parquet", acctest.RandString(t, 10)) - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - connectionID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - metadataCacheMode := "AUTOMATIC" - // including an optional field. Should work without specifiying. - // Has to follow google sql IntervalValue encoding - maxStaleness := "0-0 0 10:0:0" - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableFromGCSParquetWithQueryAcceleration(connectionID, datasetID, tableID, bucketName, objectName, metadataCacheMode, maxStaleness), - }, - }, - }) -} - -func TestAccBigQueryExternalDataTable_objectTable(t *testing.T) { - t.Parallel() - - bucketName := acctest.TestBucketName(t) - objectName := fmt.Sprintf("tf_test_%s.csv", acctest.RandString(t, 10)) - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - connectionID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - // including an optional field. Should work without specifiying. - // Has to follow google sql IntervalValue encoding - maxStaleness := "0-0 0 10:0:0" - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableFromGCSObjectTable(connectionID, datasetID, tableID, bucketName, objectName, maxStaleness), - }, - { - Config: testAccBigQueryTableFromGCSObjectTableMetadata(connectionID, datasetID, tableID, bucketName, objectName, maxStaleness), - }, - { - Config: testAccBigQueryTableFromGCSObjectTable(connectionID, datasetID, tableID, bucketName, objectName, maxStaleness), - }, - }, - }) -} - -func TestAccBigQueryExternalDataTable_connectionIdDiff_UseNameReference(t *testing.T) { - t.Parallel() - // Setup - bucketName := acctest.TestBucketName(t) - objectName := fmt.Sprintf("tf_test_%s.csv", acctest.RandString(t, 10)) - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - connectionID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - // Feature Under Test. - location := "US" - connection_id_reference := "google_bigquery_connection.test.name" - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableExternalDataConfigurationConnectionID(location, connectionID, datasetID, tableID, bucketName, objectName, connection_id_reference), - }, - }, - }) -} - -func TestAccBigQueryExternalDataTable_connectionIdDiff_UseIdReference(t *testing.T) { - t.Parallel() - // Setup - bucketName := acctest.TestBucketName(t) - objectName := fmt.Sprintf("tf_test_%s.csv", acctest.RandString(t, 10)) - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - connectionID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - // Feature Under Test. - location := "US" - connection_id_reference := "google_bigquery_connection.test.id" - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableExternalDataConfigurationConnectionID(location, connectionID, datasetID, tableID, bucketName, objectName, connection_id_reference), - }, - }, - }) -} - -func TestAccBigQueryExternalDataTable_connectionIdDiff_UseIdReference_UsCentral1LowerCase(t *testing.T) { - t.Parallel() - // Setup - bucketName := acctest.TestBucketName(t) - objectName := fmt.Sprintf("tf_test_%s.csv", acctest.RandString(t, 10)) - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - connectionID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - // Feature Under Test. - location := "us-central1" - connection_id_reference := "google_bigquery_connection.test.id" - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableExternalDataConfigurationConnectionID(location, connectionID, datasetID, tableID, bucketName, objectName, connection_id_reference), - }, - }, - }) -} - -func TestAccBigQueryExternalDataTable_connectionIdDiff_UseIdReference_UsEast1(t *testing.T) { - t.Parallel() - // Setup - bucketName := acctest.TestBucketName(t) - objectName := fmt.Sprintf("tf_test_%s.csv", acctest.RandString(t, 10)) - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - connectionID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - // Feature Under Test. - location := "US-EAST1" - connection_id_reference := "google_bigquery_connection.test.id" - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableExternalDataConfigurationConnectionID(location, connectionID, datasetID, tableID, bucketName, objectName, connection_id_reference), - }, - }, - }) -} - -func TestAccBigQueryExternalDataTable_connectionIdDiff_UseIdReference_EuropeWest8(t *testing.T) { - t.Parallel() - // Setup - bucketName := acctest.TestBucketName(t) - objectName := fmt.Sprintf("tf_test_%s.csv", acctest.RandString(t, 10)) - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - connectionID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - // Feature Under Test. - location := "EUROPE-WEST8" - connection_id_reference := "google_bigquery_connection.test.id" - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableExternalDataConfigurationConnectionID(location, connectionID, datasetID, tableID, bucketName, objectName, connection_id_reference), - }, - }, - }) -} - -func TestAccBigQueryExternalDataTable_CSV(t *testing.T) { - t.Parallel() - - bucketName := acctest.TestBucketName(t) - objectName := fmt.Sprintf("tf_test_%s.csv", acctest.RandString(t, 10)) - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableFromGCS(datasetID, tableID, bucketName, objectName, TEST_CSV, "CSV", "\\\""), - Check: testAccCheckBigQueryExtData(t, "\""), - }, - { - Config: testAccBigQueryTableFromGCS(datasetID, tableID, bucketName, objectName, TEST_CSV, "CSV", ""), - Check: testAccCheckBigQueryExtData(t, ""), - }, - }, - }) -} - -func TestAccBigQueryExternalDataTable_CSV_WithSchema_InvalidSchemas(t *testing.T) { - t.Parallel() - - bucketName := acctest.TestBucketName(t) - objectName := fmt.Sprintf("tf_test_%s.csv", acctest.RandString(t, 10)) - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableFromGCSWithExternalDataConfigSchema(datasetID, tableID, bucketName, objectName, TEST_SIMPLE_CSV, TEST_INVALID_SCHEMA_NOT_JSON), - ExpectError: regexp.MustCompile("contains an invalid JSON"), - }, - { - Config: testAccBigQueryTableFromGCSWithExternalDataConfigSchema(datasetID, tableID, bucketName, objectName, TEST_SIMPLE_CSV, TEST_INVALID_SCHEMA_NOT_JSON_LIST), - ExpectError: regexp.MustCompile("\"schema\" is not a JSON array"), - }, - { - Config: testAccBigQueryTableFromGCSWithExternalDataConfigSchema(datasetID, tableID, bucketName, objectName, TEST_SIMPLE_CSV, TEST_INVALID_SCHEMA_JSON_LIST_WITH_NULL_ELEMENT), - ExpectError: regexp.MustCompile("\"schema\" contains a nil element"), - }, - }, - }) -} - -func TestAccBigQueryExternalDataTable_CSV_WithSchemaAndConnectionID_UpdateNoConnectionID(t *testing.T) { - t.Parallel() - - bucketName := acctest.TestBucketName(t) - objectName := fmt.Sprintf("tf_test_%s.csv", acctest.RandString(t, 10)) - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - connectionID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - projectID := envvar.GetTestProjectFromEnv() - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableFromGCSWithSchemaWithConnectionId(datasetID, tableID, connectionID, projectID, bucketName, objectName, TEST_SIMPLE_CSV, TEST_SIMPLE_CSV_SCHEMA), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"etag", "last_modified_time", "deletion_protection"}, - }, - { - Config: testAccBigQueryTableFromGCSWithSchema(datasetID, tableID, bucketName, objectName, TEST_SIMPLE_CSV, TEST_SIMPLE_CSV_SCHEMA), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"etag", "last_modified_time", "deletion_protection"}, - }, - }, - }) -} - -func TestAccBigQueryExternalDataTable_CSV_WithSchema_UpdateToConnectionID(t *testing.T) { - t.Parallel() - - bucketName := acctest.TestBucketName(t) - objectName := fmt.Sprintf("tf_test_%s.csv", acctest.RandString(t, 10)) - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - connectionID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - projectID := envvar.GetTestProjectFromEnv() - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableFromGCSWithSchema(datasetID, tableID, bucketName, objectName, TEST_SIMPLE_CSV, TEST_SIMPLE_CSV_SCHEMA), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"etag", "last_modified_time", "deletion_protection"}, - }, - { - Config: testAccBigQueryTableFromGCSWithSchemaWithConnectionId(datasetID, tableID, connectionID, projectID, bucketName, objectName, TEST_SIMPLE_CSV, TEST_SIMPLE_CSV_SCHEMA), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"etag", "last_modified_time", "deletion_protection"}, - }, - { - Config: testAccBigQueryTableFromGCSWithSchemaWithConnectionId2(datasetID, tableID, connectionID, projectID, bucketName, objectName, TEST_SIMPLE_CSV, TEST_SIMPLE_CSV_SCHEMA), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"etag", "last_modified_time", "deletion_protection"}, - }, - }, - }) -} - -func TestAccBigQueryExternalDataTable_CSV_WithSchema_UpdateAllowQuotedNewlines(t *testing.T) { - t.Parallel() - - bucketName := acctest.TestBucketName(t) - objectName := fmt.Sprintf("tf_test_%s.csv", acctest.RandString(t, 10)) - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableFromGCSWithSchema(datasetID, tableID, bucketName, objectName, TEST_SIMPLE_CSV, TEST_SIMPLE_CSV_SCHEMA), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"etag", "last_modified_time", "deletion_protection"}, - }, - { - Config: testAccBigQueryTableFromGCSWithSchema_UpdatAllowQuotedNewlines(datasetID, tableID, bucketName, objectName, TEST_SIMPLE_CSV, TEST_SIMPLE_CSV_SCHEMA), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"etag", "last_modified_time", "deletion_protection"}, - }, - }, - }) -} - -func TestAccBigQueryDataTable_bigtable(t *testing.T) { - // bigtable instance does not use the shared HTTP client, this test creates an instance - acctest.SkipIfVcr(t) - t.Parallel() - - context := map[string]interface{}{ - "random_suffix": acctest.RandString(t, 8), - "project": envvar.GetTestProjectFromEnv(), - } - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableFromBigtable(context), - }, - { - ResourceName: "google_bigquery_table.table", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - }, - }) -} - -func TestAccBigQueryDataTable_bigtable_options(t *testing.T) { - // bigtable instance does not use the shared HTTP client, this test creates an instance - acctest.SkipIfVcr(t) - t.Parallel() - - context := map[string]interface{}{ - "random_suffix": acctest.RandString(t, 8), - "project": envvar.GetTestProjectFromEnv(), - } - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableFromBigtableOptions(context), - }, - { - ResourceName: "google_bigquery_table.table", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - { - Config: testAccBigQueryTableFromBigtable(context), - }, - }, - }) -} - -func TestAccBigQueryDataTable_sheet(t *testing.T) { - t.Parallel() - - context := map[string]interface{}{ - "random_suffix": acctest.RandString(t, 10), - } - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableFromSheet(context), - }, - { - ResourceName: "google_bigquery_table.table", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - }, - }) -} - -func TestAccBigQueryDataTable_jsonEquivalency(t *testing.T) { - t.Parallel() - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTable_jsonEq(datasetID, tableID), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"etag", "last_modified_time", "deletion_protection", "labels", "terraform_labels"}, - }, - { - Config: testAccBigQueryTable_jsonEqModeRemoved(datasetID, tableID), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"etag", "last_modified_time", "deletion_protection", "labels", "terraform_labels"}, - }, - }, - }) -} - -func TestAccBigQueryDataTable_canReorderParameters(t *testing.T) { - t.Parallel() - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - // we don't run any checks because the resource will error out if - // it attempts to destroy/tear down. - Config: testAccBigQueryTable_jsonPreventDestroy(datasetID, tableID), - }, - { - Config: testAccBigQueryTable_jsonPreventDestroyOrderChanged(datasetID, tableID), - }, - { - Config: testAccBigQueryTable_jsonEq(datasetID, tableID), - }, - }, - }) -} - -func TestAccBigQueryDataTable_expandArray(t *testing.T) { - t.Parallel() - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTable_arrayInitial(datasetID, tableID), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"etag", "last_modified_time", "deletion_protection", "labels", "terraform_labels"}, - }, - { - Config: testAccBigQueryTable_arrayExpanded(datasetID, tableID), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"etag", "last_modified_time", "deletion_protection", "labels", "terraform_labels"}, - }, - }, - }) -} - -func TestAccBigQueryTable_allowDestroy(t *testing.T) { - t.Parallel() - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTable_noAllowDestroy(datasetID, tableID), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection", "labels", "terraform_labels"}, - }, - { - Config: testAccBigQueryTable_noAllowDestroy(datasetID, tableID), - Destroy: true, - ExpectError: regexp.MustCompile("deletion_protection"), - }, - { - Config: testAccBigQueryTable_noAllowDestroyUpdated(datasetID, tableID), - }, - }, - }) -} - -func TestAccBigQueryTable_emptySchema(t *testing.T) { - t.Parallel() - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTable_mimicCreateFromConsole(datasetID, tableID), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - { - Config: testAccBigQueryTable_emptySchema(datasetID, tableID), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - }, - }) -} - -func TestAccBigQueryTable_Update_SchemaWithoutPolicyTagsToWithPolicyTags(t *testing.T) { - t.Parallel() - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - projectID := envvar.GetTestProjectFromEnv() - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableBasicSchema(datasetID, tableID), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - { - Config: testAccBigQueryTableBasicSchemaWithPolicyTags(datasetID, tableID, projectID), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - }, - }) -} - -func TestAccBigQueryTable_Update_SchemaWithPolicyTagsToNoPolicyTag(t *testing.T) { - t.Parallel() - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - projectID := envvar.GetTestProjectFromEnv() - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableBasicSchemaWithPolicyTags(datasetID, tableID, projectID), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - { - Config: testAccBigQueryTableBasicSchema(datasetID, tableID), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - }, - }) -} - -func TestAccBigQueryTable_Update_SchemaWithPolicyTagsToEmptyPolicyTag(t *testing.T) { - t.Parallel() - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - projectID := envvar.GetTestProjectFromEnv() - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableBasicSchemaWithPolicyTags(datasetID, tableID, projectID), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - { - Config: testAccBigQueryTableBasicSchemaWithEmptyPolicyTags(datasetID, tableID), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - }, - }) -} - -func TestAccBigQueryTable_Update_SchemaWithPolicyTagsToEmptyPolicyTagNames(t *testing.T) { - t.Parallel() - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - projectID := envvar.GetTestProjectFromEnv() - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableBasicSchemaWithPolicyTags(datasetID, tableID, projectID), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - { - Config: testAccBigQueryTableBasicSchemaWithEmptyPolicyTagNames(datasetID, tableID), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - }, - }) -} - -func TestAccBigQueryTable_invalidSchemas(t *testing.T) { - t.Parallel() - // Pending VCR support in https://github.com/hashicorp/terraform-provider-google/issues/15427. - acctest.SkipIfVcr(t) - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableWithSchema(datasetID, tableID, TEST_INVALID_SCHEMA_NOT_JSON), - ExpectError: regexp.MustCompile("contains an invalid JSON"), - }, - { - Config: testAccBigQueryTableWithSchema(datasetID, tableID, TEST_INVALID_SCHEMA_NOT_JSON_LIST), - ExpectError: regexp.MustCompile("\"schema\" is not a JSON array"), - }, - { - Config: testAccBigQueryTableWithSchema(datasetID, tableID, TEST_INVALID_SCHEMA_JSON_LIST_WITH_NULL_ELEMENT), - ExpectError: regexp.MustCompile("\"schema\" contains a nil element"), - }, - }, - }) -} - -func TestAccBigQueryTable_TableReplicationInfo_ConflictsWithView(t *testing.T) { - t.Parallel() - - datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableWithReplicationInfoAndView(datasetID, tableID), - ExpectError: regexp.MustCompile("Schema, view, or materialized view cannot be specified when table replication info is present"), - }, - }, - }) -} - -func TestAccBigQueryTable_TableReplicationInfo_WithoutReplicationInterval(t *testing.T) { - t.Parallel() - - projectID := envvar.GetTestProjectFromEnv() - - sourceDatasetID := fmt.Sprintf("tf_test_source_dataset_%s", acctest.RandString(t, 10)) - sourceTableID := fmt.Sprintf("tf_test_source_table_%s", acctest.RandString(t, 10)) - sourceMVID := fmt.Sprintf("tf_test_source_mv_%s", acctest.RandString(t, 10)) - replicaDatasetID := fmt.Sprintf("tf_test_replica_dataset_%s", acctest.RandString(t, 10)) - replicaMVID := fmt.Sprintf("tf_test_replica_mv_%s", acctest.RandString(t, 10)) - sourceMVJobID := fmt.Sprintf("tf_test_create_source_mv_job_%s", acctest.RandString(t, 10)) - dropMVJobID := fmt.Sprintf("tf_test_drop_source_mv_job_%s", acctest.RandString(t, 10)) - replicationIntervalExpr := "" - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - ExternalProviders: map[string]resource.ExternalProvider{ - "time": {}, - }, - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableWithReplicationInfo(projectID, sourceDatasetID, sourceTableID, sourceMVID, replicaDatasetID, replicaMVID, sourceMVJobID, dropMVJobID, replicationIntervalExpr), - }, - { - ResourceName: "google_bigquery_table.replica_mv", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - }, - }) -} - -func TestAccBigQueryTable_TableReplicationInfo_WithReplicationInterval(t *testing.T) { - t.Parallel() - - projectID := envvar.GetTestProjectFromEnv() - - sourceDatasetID := fmt.Sprintf("tf_test_source_dataset_%s", acctest.RandString(t, 10)) - sourceTableID := fmt.Sprintf("tf_test_source_table_%s", acctest.RandString(t, 10)) - sourceMVID := fmt.Sprintf("tf_test_source_mv_%s", acctest.RandString(t, 10)) - replicaDatasetID := fmt.Sprintf("tf_test_replica_dataset_%s", acctest.RandString(t, 10)) - replicaMVID := fmt.Sprintf("tf_test_replica_mv_%s", acctest.RandString(t, 10)) - sourceMVJobID := fmt.Sprintf("tf_test_create_source_mv_job_%s", acctest.RandString(t, 10)) - dropMVJobID := fmt.Sprintf("tf_test_drop_source_mv_job_%s", acctest.RandString(t, 10)) - replicationIntervalExpr := "replication_interval_ms = 600000" - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - ExternalProviders: map[string]resource.ExternalProvider{ - "time": {}, - }, - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableWithReplicationInfo(projectID, sourceDatasetID, sourceTableID, sourceMVID, replicaDatasetID, replicaMVID, sourceMVJobID, dropMVJobID, replicationIntervalExpr), - }, - { - ResourceName: "google_bigquery_table.replica_mv", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection"}, - }, - }, - }) -} - -{{ if ne $.TargetVersionName `ga` -}} -func TestAccBigQueryTable_ResourceTags(t *testing.T) { - t.Parallel() - - context := map[string]interface{}{ - "project_id": envvar.GetTestProjectFromEnv(), - "dataset_id": fmt.Sprintf("tf_test_dataset_%s", acctest.RandString(t, 10)), - "table_id" : fmt.Sprintf("tf_test_table_%s", acctest.RandString(t, 10)), - "tag_key_name1": fmt.Sprintf("tf_test_tag_key1_%s", acctest.RandString(t, 10)), - "tag_value_name1": fmt.Sprintf("tf_test_tag_value1_%s", acctest.RandString(t, 10)), - "tag_key_name2": fmt.Sprintf("tf_test_tag_key2_%s", acctest.RandString(t, 10)), - "tag_value_name2": fmt.Sprintf("tf_test_tag_value2_%s", acctest.RandString(t, 10)), - } - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableWithResourceTags(context), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection", "allow_resource_tags_on_deletion"}, - }, - { - Config: testAccBigQueryTableWithResourceTagsUpdate(context), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection", "allow_resource_tags_on_deletion"}, - }, - // testAccBigQueryTableWithResourceTagsDestroy must be called at the end of this test to clear the resource tag bindings of the table before deletion. - { - Config: testAccBigQueryTableWithResourceTagsDestroy(context), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"deletion_protection", "allow_resource_tags_on_deletion"}, - }, - }, - }) -} - -{{ end }} -func testAccCheckBigQueryExtData(t *testing.T, expectedQuoteChar string) resource.TestCheckFunc { - return func(s *terraform.State) error { - for _, rs := range s.RootModule().Resources { - if rs.Type != "google_bigquery_table" { - continue - } - - config := acctest.GoogleProviderConfig(t) - dataset := rs.Primary.Attributes["dataset_id"] - table := rs.Primary.Attributes["table_id"] - res, err := config.NewBigQueryClient(config.UserAgent).Tables.Get(config.Project, dataset, table).Do() - if err != nil { - return err - } - - if res.Type != "EXTERNAL" { - return fmt.Errorf("Table \"%s.%s\" is of type \"%s\", expected EXTERNAL.", dataset, table, res.Type) - } - edc := res.ExternalDataConfiguration - cvsOpts := edc.CsvOptions - if cvsOpts == nil || *cvsOpts.Quote != expectedQuoteChar { - return fmt.Errorf("Table \"%s.%s\" quote should be '%s' but was '%s'", dataset, table, expectedQuoteChar, *cvsOpts.Quote) - } - } - return nil - } -} - -func testAccCheckBigQueryTableDestroyProducer(t *testing.T) func(s *terraform.State) error { - return func(s *terraform.State) error { - for _, rs := range s.RootModule().Resources { - if rs.Type != "google_bigquery_table" { - continue - } - - config := acctest.GoogleProviderConfig(t) - _, err := config.NewBigQueryClient(config.UserAgent).Tables.Get(config.Project, rs.Primary.Attributes["dataset_id"], rs.Primary.Attributes["table_id"]).Do() - if err == nil { - return fmt.Errorf("Table still present") - } - } - - return nil - } -} - -func testAccBigQueryTableBasicSchema(datasetID, tableID string) string { - return fmt.Sprintf(` -resource "google_bigquery_dataset" "test" { - dataset_id = "%s" -} - -resource "google_bigquery_table" "test" { - deletion_protection = false - table_id = "%s" - dataset_id = google_bigquery_dataset.test.dataset_id - - schema = < 0 { + ni.AliasIpRanges = commonAliasIpRanges + } op, err := config.NewComputeClient(userAgent).Instances.UpdateNetworkInterface(project, zone, instance.Name, networkName, ni).Do() if err != nil { return errwrap.Wrapf("Error removing alias_ip_range: {{"{{"}}err{{"}}"}}", err) @@ -2717,6 +2761,10 @@ func expandAttachedDisk(diskConfig map[string]interface{}, d *schema.ResourceDat disk.DeviceName = v.(string) } + if v, ok := d.GetOk("boot_disk.0.interface"); ok && v != "" { + disk.Interface = v.(string) + } + keyValue, keyOk := diskConfig["disk_encryption_key_raw"] if keyOk { if keyValue != "" { @@ -2918,6 +2966,10 @@ func expandBootDisk(d *schema.ResourceData, config *transport_tpg.Config, projec disk.DeviceName = v.(string) } + if v, ok := d.GetOk("boot_disk.0.interface"); ok { + disk.Interface = v.(string) + } + if v, ok := d.GetOk("boot_disk.0.disk_encryption_key_raw"); ok { if v != "" { disk.DiskEncryptionKey = &compute.CustomerEncryptionKey{ @@ -3018,6 +3070,9 @@ func flattenBootDisk(d *schema.ResourceData, disk *compute.AttachedDisk, config // originally specified to avoid diffs. "disk_encryption_key_raw": d.Get("boot_disk.0.disk_encryption_key_raw"), } + if _,ok := d.GetOk("boot_disk.0.interface"); ok { + result["interface"] = disk.Interface + } diskDetails, err := getDisk(disk.Source, d, config) if err != nil { @@ -3174,3 +3229,20 @@ func isEmptyServiceAccountBlock(d *schema.ResourceData) bool { } return false } + +// Alias ip ranges cannot be removed and created at the same time. This checks if there are any unchanged alias ip ranges +// to be kept in between the PATCH operations on Network Interface +func CheckForCommonAliasIp(old, new *compute.NetworkInterface) []*compute.AliasIpRange { + newAliasIpMap := make(map[string]bool) + for _, ipRange := range new.AliasIpRanges { + newAliasIpMap[ipRange.IpCidrRange] = true + } + + resultAliasIpRanges := make([]*compute.AliasIpRange, 0) + for _, val := range old.AliasIpRanges { + if newAliasIpMap[val.IpCidrRange] { + resultAliasIpRanges = append(resultAliasIpRanges, val) + } + } + return resultAliasIpRanges +} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_from_machine_image.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_from_machine_image.go.tmpl index 2ab1d486b783..3f1a669417c7 100644 --- a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_from_machine_image.go.tmpl +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_from_machine_image.go.tmpl @@ -46,17 +46,6 @@ func computeInstanceFromMachineImageSchema() map[string]*schema.Schema { s[field].Optional = true } - // schema.SchemaConfigModeAttr allows these fields to be removed in Terraform 0.12. - // Passing field_name = [] in this mode differentiates between an intentionally empty - // block vs an ignored computed block. - nic := s["network_interface"].Elem.(*schema.Resource) - nic.Schema["alias_ip_range"].ConfigMode = schema.SchemaConfigModeAttr - nic.Schema["access_config"].ConfigMode = schema.SchemaConfigModeAttr - - for _, field := range []string{"attached_disk", "guest_accelerator", "service_account", "scratch_disk"} { - s[field].ConfigMode = schema.SchemaConfigModeAttr - } - recurseOnSchema(s, func(field *schema.Schema) { // We don't want to accidentally use default values to override the instance // machine image, so remove defaults. diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_from_machine_image_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_from_machine_image_test.go.tmpl index d01ec9aa745a..fbcc2f25170d 100644 --- a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_from_machine_image_test.go.tmpl +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_from_machine_image_test.go.tmpl @@ -120,7 +120,7 @@ func TestAccComputeInstanceFromMachineImageWithOverride_localSsdRecoveryTimeout( var expectedLocalSsdRecoveryTimeout = compute.Duration{} expectedLocalSsdRecoveryTimeout.Nanos = 0 expectedLocalSsdRecoveryTimeout.Seconds = 7200 - + acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), @@ -224,6 +224,69 @@ func TestAccComputeInstanceFromMachineImage_diffProject(t *testing.T) { }) } +func TestAccComputeInstanceFromMachineImage_confidentialInstanceConfigMain(t *testing.T) { + t.Parallel() + + var instance compute.Instance + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckComputeInstanceFromMachineImageDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceFromMachineImage_ConfidentialInstanceConfigEnable(fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)), fmt.Sprintf("tf-test-generated-%s", acctest.RandString(t, 10))), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, "google_compute_instance_from_machine_image.foobar1", &instance), + resource.TestCheckResourceAttr("google_compute_instance_from_machine_image.foobar1", "machine_type", "n2d-standard-2"), + resource.TestCheckResourceAttr("google_compute_instance_from_machine_image.foobar1", "scheduling.0.on_host_maintenance", "TERMINATE"), + testAccCheckComputeInstanceHasConfidentialInstanceConfig(&instance, true, ""), + ), + }, + { + Config: testAccComputeInstanceFromMachineImage_ConfidentialInstanceConfigEnableSev(fmt.Sprintf("tf-test-sev0-%s", acctest.RandString(t, 10)), fmt.Sprintf("tf-test-sev0-generated-%s", acctest.RandString(t, 10)), "SEV"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, "google_compute_instance_from_machine_image.foobar2", &instance), + resource.TestCheckResourceAttr("google_compute_instance_from_machine_image.foobar2", "machine_type", "n2d-standard-2"), + resource.TestCheckResourceAttr("google_compute_instance_from_machine_image.foobar2", "scheduling.0.on_host_maintenance", "TERMINATE"), + testAccCheckComputeInstanceHasConfidentialInstanceConfig(&instance, true, "SEV"), + ), + }, + { + Config: testAccComputeInstanceFromMachineImage_ConfidentialInstanceConfigNoEnableSev(fmt.Sprintf("tf-test-sev1-%s", acctest.RandString(t, 10)), fmt.Sprintf("tf-test-sev1-generated-%s", acctest.RandString(t, 10)), "SEV"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, "google_compute_instance_from_machine_image.foobar3", &instance), + resource.TestCheckResourceAttr("google_compute_instance_from_machine_image.foobar3", "min_cpu_platform", "AMD Milan"), + resource.TestCheckResourceAttr("google_compute_instance_from_machine_image.foobar3", "scheduling.0.on_host_maintenance", "TERMINATE"), + testAccCheckComputeInstanceHasConfidentialInstanceConfig(&instance, false, "SEV"), + ), + }, + { + Config: testAccComputeInstanceFromMachineImage_ConfidentialInstanceConfigSevSnp(fmt.Sprintf("tf-test-sev-snp-%s", acctest.RandString(t, 10)), fmt.Sprintf("tf-test-sev-snp-generated-%s", acctest.RandString(t, 10)), "SEV_SNP"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, "google_compute_instance_from_machine_image.foobar4", &instance), + resource.TestCheckResourceAttr("google_compute_instance_from_machine_image.foobar4", "min_cpu_platform", "AMD Milan"), + resource.TestCheckResourceAttr("google_compute_instance_from_machine_image.foobar4", "scheduling.0.on_host_maintenance", "TERMINATE"), + testAccCheckComputeInstanceHasConfidentialInstanceConfig(&instance, false, "SEV_SNP"), + ), + }, + {{- if ne $.TargetVersionName "ga" }} + { + Config: testAccComputeInstanceFromMachineImage_ConfidentialInstanceConfigTdx(fmt.Sprintf("tf-test-tdx-%s", acctest.RandString(t, 10)), fmt.Sprintf("tf-test-tdx-generated-%s", acctest.RandString(t, 10)), "TDX"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, "google_compute_instance_from_machine_image.foobar5", &instance), + // Check that fields were set based on the template + resource.TestCheckResourceAttr("google_compute_instance_from_machine_image.foobar5", "machine_type", "c3-standard-4"), + resource.TestCheckResourceAttr("google_compute_instance_from_machine_image.foobar5", "scheduling.0.on_host_maintenance", "TERMINATE"), + testAccCheckComputeInstanceHasConfidentialInstanceConfig(&instance, false, "TDX"), + ), + + }, + {{- end }} + }, + }) +} + func testAccCheckComputeInstanceFromMachineImageDestroyProducer(t *testing.T) func(s *terraform.State) error { return func(s *terraform.State) error { config := acctest.GoogleProviderConfig(t) @@ -298,6 +361,298 @@ resource "google_compute_instance_from_machine_image" "foobar" { `, instance, instance, newInstance) } +func testAccComputeInstanceFromMachineImage_ConfidentialInstanceConfigEnable(instance string, newInstance string) string { + return fmt.Sprintf(` +resource "google_compute_instance" "vm1" { + provider = google-beta + + boot_disk { + initialize_params { + image = "ubuntu-os-cloud/ubuntu-2004-lts" + } + } + + name = "%s" + machine_type = "n2d-standard-2" + + network_interface { + network = "default" + } + + metadata = { + foo = "bar" + } + + confidential_instance_config { + enable_confidential_compute = true + } + + scheduling { + on_host_maintenance = "TERMINATE" + } +} + +resource "google_compute_machine_image" "foobar1" { + provider = google-beta + name = "%s" + source_instance = google_compute_instance.vm1.self_link +} + +resource "google_compute_instance_from_machine_image" "foobar1" { + provider = google-beta + name = "%s" + zone = "us-central1-a" + + source_machine_image = google_compute_machine_image.foobar1.self_link + + labels = { + my_key = "my_value" + } + confidential_instance_config { + enable_confidential_compute = true + } + scheduling { + on_host_maintenance = "TERMINATE" + } +} +`, instance, instance, newInstance) +} + +func testAccComputeInstanceFromMachineImage_ConfidentialInstanceConfigEnableSev(instance string, newInstance string, confidentialInstanceType string) string { + return fmt.Sprintf(` +resource "google_compute_instance" "vm2" { + provider = google-beta + + boot_disk { + initialize_params { + image = "ubuntu-os-cloud/ubuntu-2004-lts" + } + } + + name = "%s" + machine_type = "n2d-standard-2" + + network_interface { + network = "default" + } + + metadata = { + foo = "bar" + } + + confidential_instance_config { + enable_confidential_compute = true + confidential_instance_type = %q + } + + scheduling { + on_host_maintenance = "TERMINATE" + } +} + +resource "google_compute_machine_image" "foobar2" { + provider = google-beta + name = "%s" + source_instance = google_compute_instance.vm2.self_link +} + +resource "google_compute_instance_from_machine_image" "foobar2" { + provider = google-beta + name = "%s" + zone = "us-central1-a" + + source_machine_image = google_compute_machine_image.foobar2.self_link + + labels = { + my_key = "my_value" + } + confidential_instance_config { + enable_confidential_compute = true + confidential_instance_type = %q + } +} +`, instance, confidentialInstanceType, instance, newInstance, confidentialInstanceType) +} + +func testAccComputeInstanceFromMachineImage_ConfidentialInstanceConfigNoEnableSev(instance string, newInstance string, confidentialInstanceType string) string { + return fmt.Sprintf(` +resource "google_compute_instance" "vm3" { + provider = google-beta + + boot_disk { + initialize_params { + image = "ubuntu-os-cloud/ubuntu-2004-lts" + } + } + + name = "%s" + machine_type = "n2d-standard-2" + + network_interface { + network = "default" + } + + metadata = { + foo = "bar" + } + + min_cpu_platform = "AMD Milan" + + confidential_instance_config { + enable_confidential_compute = false + confidential_instance_type = %q + } +} + +resource "google_compute_machine_image" "foobar3" { + provider = google-beta + name = "%s" + source_instance = google_compute_instance.vm3.self_link +} + +resource "google_compute_instance_from_machine_image" "foobar3" { + provider = google-beta + name = "%s" + zone = "us-central1-a" + + source_machine_image = google_compute_machine_image.foobar3.self_link + + labels = { + my_key = "my_value" + } + + confidential_instance_config { + enable_confidential_compute = false + confidential_instance_type = %q + } + + scheduling { + on_host_maintenance = "TERMINATE" + } +} +`, instance, confidentialInstanceType, instance, newInstance, confidentialInstanceType) +} + +func testAccComputeInstanceFromMachineImage_ConfidentialInstanceConfigSevSnp(instance string, newInstance string, confidentialInstanceType string) string { + return fmt.Sprintf(` +resource "google_compute_instance" "vm4" { + provider = google-beta + + boot_disk { + initialize_params { + image = "ubuntu-os-cloud/ubuntu-2004-lts" + } + } + + name = "%s" + machine_type = "n2d-standard-2" + + network_interface { + network = "default" + } + + metadata = { + foo = "bar" + } + + min_cpu_platform = "AMD Milan" + + confidential_instance_config { + enable_confidential_compute = false + confidential_instance_type = %q + } + + scheduling { + on_host_maintenance = "TERMINATE" + } +} + +resource "google_compute_machine_image" "foobar4" { + provider = google-beta + name = "%s" + source_instance = google_compute_instance.vm4.self_link +} + +resource "google_compute_instance_from_machine_image" "foobar4" { + provider = google-beta + name = "%s" + zone = "us-central1-a" + + source_machine_image = google_compute_machine_image.foobar4.self_link + + labels = { + my_key = "my_value" + } + confidential_instance_config { + enable_confidential_compute = false + confidential_instance_type = %q + } + scheduling { + on_host_maintenance = "TERMINATE" + } +} +`, instance, confidentialInstanceType, instance, newInstance, confidentialInstanceType) +} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccComputeInstanceFromMachineImage_ConfidentialInstanceConfigTdx(instance string, newInstance string, confidentialInstanceType string) string { + return fmt.Sprintf(` +resource "google_compute_instance" "vm5" { + provider = google-beta + + boot_disk { + initialize_params { + image = "tdx-guest-images/ubuntu-2204-jammy-v20240701" + } + } + + name = "%s" + machine_type = "c3-standard-4" + + network_interface { + network = "default" + } + + metadata = { + foo = "bar" + } + + confidential_instance_config { + confidential_instance_type = %q + } + + scheduling { + on_host_maintenance = "TERMINATE" + } +} + +resource "google_compute_machine_image" "foobar5" { + provider = google-beta + name = "%s" + source_instance = google_compute_instance.vm5.self_link +} + +resource "google_compute_instance_from_machine_image" "foobar5" { + provider = google-beta + name = "%s" + zone = "us-central1-a" + + source_machine_image = google_compute_machine_image.foobar5.self_link + + labels = { + my_key = "my_value" + } + confidential_instance_config { + confidential_instance_type = %q + } + scheduling { + on_host_maintenance = "TERMINATE" + } +} +`, instance, confidentialInstanceType, instance, newInstance, confidentialInstanceType) +} +{{- end }} + {{ if ne $.TargetVersionName `ga` -}} func testAccComputeInstanceFromMachineImage_maxRunDuration(instance, newInstance string) string { return fmt.Sprintf(` @@ -501,7 +856,7 @@ resource "google_compute_instance" "vm" { } partner_metadata = { - "test.compute.googleapis.com" = jsonencode({ + "test.compute.googleapis.com" = jsonencode({ entries = { key = "value" } @@ -530,7 +885,7 @@ resource "google_compute_instance_from_machine_image" "foobar" { } partner_metadata = { - "test.compute.googleapis.com" = jsonencode({ + "test.compute.googleapis.com" = jsonencode({ entries = { key1 = "value1" key2 = 2 diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_from_template.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_from_template.go.tmpl index fae3529c0ea6..6a691b8317a4 100644 --- a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_from_template.go.tmpl +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_from_template.go.tmpl @@ -50,17 +50,6 @@ func computeInstanceFromTemplateSchema() map[string]*schema.Schema { s[field].Optional = true } - // schema.SchemaConfigModeAttr allows these fields to be removed in Terraform 0.12. - // Passing field_name = [] in this mode differentiates between an intentionally empty - // block vs an ignored computed block. - nic := s["network_interface"].Elem.(*schema.Resource) - nic.Schema["alias_ip_range"].ConfigMode = schema.SchemaConfigModeAttr - nic.Schema["access_config"].ConfigMode = schema.SchemaConfigModeAttr - - for _, field := range []string{"attached_disk", "guest_accelerator", "service_account", "scratch_disk"} { - s[field].ConfigMode = schema.SchemaConfigModeAttr - } - // Remove deprecated/removed fields that are never d.Set. We can't // programmatically remove all of them, because some of them still have d.Set // calls. diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_from_template_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_from_template_test.go.tmpl index 27eb5b8f6030..7b32689d6a2f 100644 --- a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_from_template_test.go.tmpl +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_from_template_test.go.tmpl @@ -377,49 +377,6 @@ func TestAccComputeInstanceFromTemplate_overrideScheduling(t *testing.T) { }) } -func TestAccComputeInstanceFromTemplate_012_removableFields(t *testing.T) { - t.Parallel() - - var instance compute.Instance - instanceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) - templateName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) - resourceName := "google_compute_instance_from_template.inst" - - // First config is a basic instance from template, second tests the empty list syntax - config1 := testAccComputeInstanceFromTemplate_012_removableFieldsTpl(templateName) + - testAccComputeInstanceFromTemplate_012_removableFields1(instanceName) - config2 := testAccComputeInstanceFromTemplate_012_removableFieldsTpl(templateName) + - testAccComputeInstanceFromTemplate_012_removableFields2(instanceName) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckComputeInstanceFromTemplateDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: config1, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeInstanceExists(t, resourceName, &instance), - - resource.TestCheckResourceAttr(resourceName, "service_account.#", "1"), - resource.TestCheckResourceAttr(resourceName, "service_account.0.scopes.#", "3"), - ), - }, - { - Config: config2, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeInstanceExists(t, resourceName, &instance), - - // Check that fields were able to be removed - resource.TestCheckResourceAttr(resourceName, "scratch_disk.#", "0"), - resource.TestCheckResourceAttr(resourceName, "attached_disk.#", "0"), - resource.TestCheckResourceAttr(resourceName, "network_interface.0.alias_ip_range.#", "0"), - ), - }, - }, - }) -} - func TestAccComputeInstanceFromTemplate_overrideMetadataDotStartupScript(t *testing.T) { var instance compute.Instance instanceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) @@ -463,6 +420,71 @@ func testAccCheckComputeInstanceFromTemplateDestroyProducer(t *testing.T) func(s } } +func TestAccComputeInstanceFromTemplate_confidentialInstanceConfigMain(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instance2 compute.Instance + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceFromTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceFromTemplate_confidentialInstanceConfigEnable( + fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)), + fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)), + fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)), + fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)), + fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)), + fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)), + "SEV"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, "google_compute_instance_from_template.inst1", &instance), + testAccCheckComputeInstanceHasConfidentialInstanceConfig(&instance, true, "SEV"), + testAccCheckComputeInstanceExists(t, "google_compute_instance_from_template.inst2", &instance2), + testAccCheckComputeInstanceHasConfidentialInstanceConfig(&instance2, true, ""), + ), + }, + { + Config: testAccComputeInstanceFromTemplate_confidentialInstanceConfigNoConfigSevSnp( + fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)), + fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)), + fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)), + fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)), + fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)), + fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)), + "SEV_SNP"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, "google_compute_instance_from_template.inst1", &instance), + testAccCheckComputeInstanceHasConfidentialInstanceConfig(&instance, false, "SEV_SNP"), + testAccCheckComputeInstanceExists(t, "google_compute_instance_from_template.inst2", &instance2), + testAccCheckComputeInstanceHasConfidentialInstanceConfig(&instance2, false, "SEV_SNP"), + ), + }, + {{- if ne $.TargetVersionName "ga" }} + { + Config: testAccComputeInstanceFromTemplate_confidentialInstanceConfigNoConfigTdx( + fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)), + fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)), + fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)), + fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)), + fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)), + fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)), + "TDX"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, "google_compute_instance_from_template.inst1", &instance), + testAccCheckComputeInstanceHasConfidentialInstanceConfig(&instance, false, "TDX"), + testAccCheckComputeInstanceExists(t, "google_compute_instance_from_template.inst2", &instance2), + testAccCheckComputeInstanceHasConfidentialInstanceConfig(&instance2, false, "TDX"), + ), + }, + {{- end }} + }, + }) +} + func testAccComputeInstanceFromTemplate_basic(instance, template string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { @@ -878,7 +900,7 @@ resource "google_compute_instance_template" "foobar" { } partner_metadata = { - "test.compute.googleapis.com" = jsonencode({ + "test.compute.googleapis.com" = jsonencode({ entries = { key1 = "value1" key2 = 2 @@ -964,7 +986,7 @@ resource "google_compute_instance_template" "foobar" { } partner_metadata = { - "test.compute.googleapis.com" = jsonencode({ + "test.compute.googleapis.com" = jsonencode({ entries = { key1 = "value1" } @@ -989,7 +1011,7 @@ resource "google_compute_instance_from_template" "foobar" { automatic_restart = false } partner_metadata = { - "test.compute.googleapis.com" = jsonencode({ + "test.compute.googleapis.com" = jsonencode({ entries = { key1 = "value1" key2 = 2 @@ -1430,8 +1452,7 @@ resource "google_compute_instance_from_template" "inst" { `, templateDisk, template, instance) } -func testAccComputeInstanceFromTemplate_012_removableFieldsTpl(template string) string { - +func testAccComputeInstanceFromTemplate_overrideMetadataDotStartupScript(instance, template string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { family = "debian-11" @@ -1445,7 +1466,6 @@ resource "google_compute_instance_template" "foobar" { disk { source_image = data.google_compute_image.my_image.self_link auto_delete = true - disk_size_gb = 20 boot = true } @@ -1454,75 +1474,242 @@ resource "google_compute_instance_template" "foobar" { } metadata = { - foo = "bar" - } - - service_account { - scopes = ["userinfo-email", "compute-ro", "storage-ro"] + startup-script = "#!/bin/bash\necho Hello" } can_ip_forward = true } -`, template) -} -func testAccComputeInstanceFromTemplate_012_removableFields1(instance string) string { - return fmt.Sprintf(` resource "google_compute_instance_from_template" "inst" { name = "%s" zone = "us-central1-a" - allow_stopping_for_update = true - source_instance_template = google_compute_instance_template.foobar.self_link + + // Overrides + metadata = { + startup-script = "" + } } -`, instance) +`, template, instance) } -func testAccComputeInstanceFromTemplate_012_removableFields2(instance string) string { +func testAccComputeInstanceFromTemplate_confidentialInstanceConfigEnable(templateDisk string, image string, template string, instance string, template2 string, instance2 string, confidentialInstanceType string) string { return fmt.Sprintf(` -resource "google_compute_instance_from_template" "inst" { +data "google_compute_image" "my_image1" { + family = "ubuntu-2004-lts" + project = "ubuntu-os-cloud" +} + +resource "google_compute_disk" "foobar1" { + name = "%s" + image = data.google_compute_image.my_image1.self_link + size = 10 + type = "pd-standard" + zone = "us-central1-a" +} + +resource "google_compute_image" "foobar1" { + name = "%s" + source_disk = google_compute_disk.foobar1.self_link +} + +resource "google_compute_instance_template" "foobar1" { + name = "%s" + machine_type = "n2d-standard-2" + + disk { + source_image = google_compute_image.foobar1.name + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + metadata = { + foo = "bar" + } + + scheduling { + automatic_restart = false + on_host_maintenance = "TERMINATE" + } + + confidential_instance_config { + enable_confidential_compute = true + confidential_instance_type = %q + } +} + +resource "google_compute_instance_from_template" "inst1" { name = "%s" zone = "us-central1-a" - allow_stopping_for_update = true + source_instance_template = google_compute_instance_template.foobar1.self_link +} - source_instance_template = google_compute_instance_template.foobar.self_link +resource "google_compute_instance_template" "foobar2" { + name = "%s" + machine_type = "n2d-standard-2" + + disk { + source_image = google_compute_image.foobar1.name + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + metadata = { + foo = "bar" + } + + scheduling { + automatic_restart = false + on_host_maintenance = "TERMINATE" + } + + confidential_instance_config { + enable_confidential_compute = true + } +} + +resource "google_compute_instance_from_template" "inst2" { + name = "%s" + zone = "us-central1-a" + + source_instance_template = google_compute_instance_template.foobar2.self_link +} +`, templateDisk, image, template, confidentialInstanceType, instance, template2, instance2) +} + +func testAccComputeInstanceFromTemplate_confidentialInstanceConfigNoConfigSevSnp(templateDisk string, image string, template string, instance string, template2 string, instance2 string, confidentialInstanceType string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image1" { + family = "ubuntu-2004-lts" + project = "ubuntu-os-cloud" +} + +resource "google_compute_disk" "foobar1" { + name = "%s" + image = data.google_compute_image.my_image1.self_link + size = 10 + type = "pd-standard" + zone = "us-central1-a" +} + +resource "google_compute_image" "foobar1" { + name = "%s" + source_disk = google_compute_disk.foobar1.self_link +} + +resource "google_compute_instance_template" "foobar3" { + name = "%s" + machine_type = "n2d-standard-2" + + disk { + source_image = google_compute_image.foobar1.name + auto_delete = true + boot = true + } - // Overrides network_interface { - alias_ip_range = [] + network = "default" + } + + metadata = { + foo = "bar" + } + + scheduling { + automatic_restart = false + on_host_maintenance = "TERMINATE" + } + + confidential_instance_config { + enable_confidential_compute = false + confidential_instance_type = %q + } +} + +resource "google_compute_instance_from_template" "inst1" { + name = "%s" + zone = "us-central1-a" + + source_instance_template = google_compute_instance_template.foobar3.self_link +} + +resource "google_compute_instance_template" "foobar4" { + name = "%s" + machine_type = "n2d-standard-2" + + disk { + source_image = google_compute_image.foobar1.name + auto_delete = true + boot = true } - service_account = [] + network_interface { + network = "default" + } - scratch_disk = [] + metadata = { + foo = "bar" + } - attached_disk = [] + scheduling { + automatic_restart = false + on_host_maintenance = "TERMINATE" + } - timeouts { - create = "10m" - update = "10m" + confidential_instance_config { + confidential_instance_type = %q } } -`, instance) + +resource "google_compute_instance_from_template" "inst2" { + name = "%s" + zone = "us-central1-a" + + source_instance_template = google_compute_instance_template.foobar4.self_link +} +`, templateDisk, image, template, confidentialInstanceType, instance, template2, confidentialInstanceType, instance2) } -func testAccComputeInstanceFromTemplate_overrideMetadataDotStartupScript(instance, template string) string { +func testAccComputeInstanceFromTemplate_confidentialInstanceConfigNoConfigTdx(templateDisk string, image string, template string, instance string, template2 string, instance2 string, confidentialInstanceType string) string { return fmt.Sprintf(` -data "google_compute_image" "my_image" { - family = "debian-11" - project = "debian-cloud" +data "google_compute_image" "my_image2" { + family = "ubuntu-2204-lts" + project = "tdx-guest-images" } -resource "google_compute_instance_template" "foobar" { +resource "google_compute_disk" "foobar2" { + name = "%s" + image = data.google_compute_image.my_image2.self_link + size = 10 + type = "pd-balanced" + zone = "us-central1-a" +} + +resource "google_compute_image" "foobar2" { + name = "%s" + source_disk = google_compute_disk.foobar2.self_link +} + +resource "google_compute_instance_template" "foobar5" { name = "%s" - machine_type = "e2-medium" + machine_type = "c3-standard-4" disk { - source_image = data.google_compute_image.my_image.self_link + source_image = google_compute_image.foobar2.name auto_delete = true boot = true + disk_type = "pd-balanced" + type = "PERSISTENT" } network_interface { @@ -1530,22 +1717,142 @@ resource "google_compute_instance_template" "foobar" { } metadata = { - startup-script = "#!/bin/bash\necho Hello" + foo = "bar" } - can_ip_forward = true + scheduling { + automatic_restart = false + on_host_maintenance = "TERMINATE" + } + + confidential_instance_config { + enable_confidential_compute = false + confidential_instance_type = %q + } } -resource "google_compute_instance_from_template" "inst" { +resource "google_compute_instance_from_template" "inst1" { name = "%s" zone = "us-central1-a" - source_instance_template = google_compute_instance_template.foobar.self_link + source_instance_template = google_compute_instance_template.foobar5.self_link +} + +resource "google_compute_instance_template" "foobar6" { + name = "%s" + machine_type = "c3-standard-4" + + disk { + source_image = google_compute_image.foobar2.name + auto_delete = true + boot = true + disk_type = "pd-balanced" + type = "PERSISTENT" + } + + network_interface { + network = "default" + } - // Overrides metadata = { - startup-script = "" + foo = "bar" + } + + scheduling { + automatic_restart = false + on_host_maintenance = "TERMINATE" + } + + confidential_instance_config { + confidential_instance_type = %q } } -`, template, instance) + +resource "google_compute_instance_from_template" "inst2" { + name = "%s" + zone = "us-central1-a" + + source_instance_template = google_compute_instance_template.foobar6.self_link +} +`, templateDisk, image, template, confidentialInstanceType, instance, template2, confidentialInstanceType, instance2) +} + +func TestAccComputeInstanceFromTemplateWithOverride_interface(t *testing.T) { + t.Parallel() + + var instance compute.Instance + instanceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + templateName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + resourceName := "google_compute_instance_from_template.foobar" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceFromTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceFromTemplateWithOverride_interface(instanceName, templateName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, resourceName, &instance), + resource.TestCheckResourceAttr(resourceName, "boot_disk.0.interface", "SCSI"), + ), + }, + }, + }) +} + +func testAccComputeInstanceFromTemplateWithOverride_interface(instance, template string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_disk" "foobarboot" { + name = "%s" + image = data.google_compute_image.my_image.self_link + size = 10 + type = "pd-ssd" + zone = "us-central1-a" +} + +resource "google_compute_disk" "foobarattach" { + name = "%s" + size = 100 + type = "pd-balanced" + zone = "us-central1-a" +} + +resource "google_compute_instance_template" "foobar" { + name = "%s" + machine_type = "n1-standard-1" // can't be e2 because of local-ssd + + disk { + source = google_compute_disk.foobarboot.name + auto_delete = false + boot = true + } + + + network_interface { + network = "default" + } +} + +resource "google_compute_instance_from_template" "foobar" { + name = "%s" + zone = "us-central1-a" + + source_instance_template = google_compute_instance_template.foobar.self_link + + attached_disk { + source = google_compute_disk.foobarattach.name + } + // Overrides + boot_disk { + interface = "SCSI" + source = google_compute_disk.foobarboot.name + } +} +`, template, instance, template, instance) } diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_settings_test.go b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_settings_test.go index b172d6e00f44..7567a28d28e8 100644 --- a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_settings_test.go +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_settings_test.go @@ -12,7 +12,7 @@ func TestAccComputeInstanceSettings_update(t *testing.T) { t.Parallel() context := map[string]interface{}{ - "random_suffix": acctest.RandString(t, 10), + "random_suffix": acctest.RandString(t, 10), } acctest.VcrTest(t, resource.TestCase{ diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_template.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_template.go.tmpl index 7914f0c9dafe..bb3b7d8521c2 100644 --- a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_template.go.tmpl +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_template.go.tmpl @@ -95,14 +95,14 @@ func ResourceComputeInstanceTemplate() *schema.Resource { Optional: true, Computed: true, ForceNew: true, - Description: `Creates a unique name beginning with the specified prefix. Conflicts with name.`, + Description: `Creates a unique name beginning with the specified prefix. Conflicts with name. Max length is 54 characters. Prefixes with lengths longer than 37 characters will use a shortened UUID that will be more prone to collisions.`, ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { // https://cloud.google.com/compute/docs/reference/latest/instanceTemplates#resource - // uuid is 26 characters, limit the prefix to 37. + // shortened uuid is 9 characters, limit the prefix to 55. value := v.(string) - if len(value) > 37 { + if len(value) > 54 { errors = append(errors, fmt.Errorf( - "%q cannot be longer than 37 characters, name is limited to 63", k)) + "%q cannot be longer than 54 characters, name is limited to 63", k)) } return }, @@ -775,7 +775,7 @@ be from 0 to 999,999,999 inclusive.`, recovery of the Local Ssd state is attempted. Its value should be in between 0 and 168 hours with hour granularity and the default value being 1 hour.`, - + Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "seconds": { @@ -910,9 +910,10 @@ be from 0 to 999,999,999 inclusive.`, Optional: true, ForceNew: true, Description: ` - Specifies which confidential computing technology to use. - This could be one of the following values: SEV, SEV_SNP. - If SEV_SNP, min_cpu_platform = "AMD Milan" is currently required.`, + The confidential computing technology the instance uses. + SEV is an AMD feature. TDX is an Intel feature. One of the following + values is required: SEV, SEV_SNP, TDX. If SEV_SNP, min_cpu_platform = + "AMD Milan" is currently required. TDX is only available in beta.`, AtLeastOneOf: []string{"confidential_instance_config.0.enable_confidential_compute", "confidential_instance_config.0.confidential_instance_type"}, }, }, @@ -1002,7 +1003,7 @@ be from 0 to 999,999,999 inclusive.`, Elem: &schema.Schema{Type: schema.TypeString}, Set: schema.HashString, Description: `A set of key/value label pairs to assign to instances created from this template. - + **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.`, }, @@ -1432,7 +1433,12 @@ func resourceComputeInstanceTemplateCreate(d *schema.ResourceData, meta interfac if v, ok := d.GetOk("name"); ok { itName = v.(string) } else if v, ok := d.GetOk("name_prefix"); ok { - itName = id.PrefixedUniqueId(v.(string)) + prefix := v.(string) + if len(prefix) > 37 { + itName = tpgresource.ReducedPrefixedUniqueId(prefix) + } else { + itName = id.PrefixedUniqueId(prefix) + } } else { itName = id.UniqueId() } diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_template_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_template_test.go.tmpl index ac553ff83357..da3487257612 100644 --- a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_template_test.go.tmpl +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_template_test.go.tmpl @@ -804,6 +804,15 @@ func TestAccComputeInstanceTemplate_ConfidentialInstanceConfigMain(t *testing.T) testAccCheckComputeInstanceTemplateHasConfidentialInstanceConfig(&instanceTemplate2, false, "SEV_SNP"), ), }, + {{- if ne $.TargetVersionName "ga" }} + { + Config: testAccComputeInstanceTemplateConfidentialInstanceConfigEnableTdx(acctest.RandString(t, 10), "TDX"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceTemplateExists(t, "google_compute_instance_template.foobar5", &instanceTemplate), + testAccCheckComputeInstanceTemplateHasConfidentialInstanceConfig(&instanceTemplate, false, "TDX"), + ), + }, + {{- end }} }, }) } @@ -865,6 +874,45 @@ func TestAccComputeInstanceTemplate_invalidDiskType(t *testing.T) { }) } +func TestAccComputeInstanceTemplate_withNamePrefix(t *testing.T) { + t.Parallel() + + // 8 + 46 = 54 which is the valid max + normalPrefix := "tf-test-" + fmt.Sprintf("%046s", "") + reducedSuffixPrefix := "tf-test-" + fmt.Sprintf("%029s", "") + invalidPrefix := "tf-test-" + fmt.Sprintf("%047s", "") + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceTemplate_withNamePrefix(normalPrefix), + }, + { + ResourceName: "google_compute_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name_prefix"}, + }, + { + Config: testAccComputeInstanceTemplate_withNamePrefix(invalidPrefix), + PlanOnly: true, + ExpectError: regexp.MustCompile("cannot be longer than 54 characters"), + }, + { + Config: testAccComputeInstanceTemplate_withNamePrefix(reducedSuffixPrefix), + }, + { + ResourceName: "google_compute_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name_prefix"}, + }, + }, + }) +} + func TestAccComputeInstanceTemplate_withScratchDisk(t *testing.T) { t.Parallel() @@ -1043,7 +1091,7 @@ func TestAccComputeInstanceTemplate_managedEnvoy(t *testing.T) { ImportStateVerify: true, ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, }, - }, + }, }) } @@ -1621,7 +1669,7 @@ func testAccCheckComputeInstanceTemplateExistsInProject(t *testing.T, n, p strin found, err := config.NewComputeClient(config.UserAgent).InstanceTemplates.Get( p, templateName).View("FULL").Do() {{- end }} - + if err != nil { return err } @@ -2599,6 +2647,28 @@ resource "google_compute_instance_template" "foobar" { `, suffix, suffix) } +func testAccComputeInstanceTemplate_withNamePrefix(prefix string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-12" + project = "debian-cloud" +} +resource "google_compute_instance_template" "foobar" { + name_prefix = "%s" + machine_type = "n1-standard-1" // can't be e2 because of local-ssd + can_ip_forward = false + disk { + source_image = data.google_compute_image.my_image.name + auto_delete = true + boot = true + } + network_interface { + network = "default" + } +} +`, prefix) +} + func testAccComputeInstanceTemplate_with375GbScratchDisk(suffix string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { @@ -3442,6 +3512,41 @@ resource "google_compute_instance_template" "foobar4" { `, suffix, minCpuPlatform, confidentialInstanceType, suffix, minCpuPlatform, confidentialInstanceType) } +{{ if ne $.TargetVersionName `ga` -}} +func testAccComputeInstanceTemplateConfidentialInstanceConfigEnableTdx(suffix string, confidentialInstanceType string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image3" { + family = "ubuntu-2204-lts" + project = "tdx-guest-images" +} + +resource "google_compute_instance_template" "foobar5" { + name = "tf-test-instance5-template-%s" + machine_type = "c3-standard-4" + + disk { + source_image = data.google_compute_image.my_image3.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + confidential_instance_config { + confidential_instance_type = %q + } + + scheduling { + on_host_maintenance = "TERMINATE" + } + +} +`, suffix, confidentialInstanceType) +} +{{- end }} + func testAccComputeInstanceTemplateAdvancedMachineFeatures(suffix string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { @@ -3976,7 +4081,7 @@ resource "google_compute_instance_template" "foobar" { } partner_metadata = { - "test.compute.googleapis.com" = jsonencode({ + "test.compute.googleapis.com" = jsonencode({ entries = { key1 = "value1" key2 = 2 diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_test.go.tmpl index 7308ff48735d..1645c003fed7 100644 --- a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_test.go.tmpl +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_test.go.tmpl @@ -20,6 +20,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/stretchr/testify/assert" "github.com/hashicorp/terraform-provider-google/google/acctest" "github.com/hashicorp/terraform-provider-google/google/envvar" tpgcompute "github.com/hashicorp/terraform-provider-google/google/services/compute" @@ -101,6 +102,60 @@ func TestMinCpuPlatformDiffSuppress(t *testing.T) { } } +func TestCheckForCommonAliasIp(t *testing.T) { + type testCase struct { + old, new []*compute.AliasIpRange + expected []*compute.AliasIpRange + } + + testCases := []testCase{ + { + old: []*compute.AliasIpRange{ + {IpCidrRange: "10.0.0.0/24"}, + {IpCidrRange: "10.0.1.0/24"}, + }, + new: []*compute.AliasIpRange{ + {IpCidrRange: "10.0.0.0/24"}, + {IpCidrRange: "10.0.2.0/24"}, + }, + expected: []*compute.AliasIpRange{ + {IpCidrRange: "10.0.0.0/24"}, + }, + }, + { + old: []*compute.AliasIpRange{ + {IpCidrRange: "172.16.0.0/24"}, + {IpCidrRange: "10.0.1.0/24"}, + }, + new: []*compute.AliasIpRange{ + {IpCidrRange: "172.16.0.0/24"}, + {IpCidrRange: "10.0.2.0/24"}, + }, + expected: []*compute.AliasIpRange{ + {IpCidrRange: "172.16.0.0/24"}, + }, + }, + { + old: []*compute.AliasIpRange{ + {IpCidrRange: "10.0.0.0/24"}, + {IpCidrRange: "10.0.1.0/24"}, + }, + new: []*compute.AliasIpRange{ + {IpCidrRange: "192.168.0.0/24"}, + {IpCidrRange: "172.17.0.0/24"}, + }, + expected: []*compute.AliasIpRange{}, + }, + } + + for _, tc := range testCases { + oldInterface := &compute.NetworkInterface{AliasIpRanges: tc.old} + newInterface := &compute.NetworkInterface{AliasIpRanges: tc.new} + result := tpgcompute.CheckForCommonAliasIp(oldInterface, newInterface) + assert.Equal(t, tc.expected, result) + } +} + func computeInstanceImportStep(zone, instanceName string, additionalImportIgnores []string) resource.TestStep { // metadata is only read into state if set in the config // importing doesn't know whether metadata.startup_script vs metadata_startup_script is set in the config, @@ -1820,7 +1875,7 @@ func TestAccComputeInstance_secondaryAliasIpRange(t *testing.T) { testAccCheckComputeInstanceHasAliasIpRange(&instance, "inst-test-secondary", "172.16.0.0/24"), ), }, - computeInstanceImportStep("us-east1-d", instanceName, []string{}), + computeInstanceImportStep("us-east1-d", instanceName, []string{"network_interface.0.alias_ip_range.0.ip_cidr_range", "network_interface.0.alias_ip_range.0.subnetwork_range_name", "network_interface.0.alias_ip_range.1.ip_cidr_range", "network_interface.0.alias_ip_range.1.subnetwork_range_name"}), { Config: testAccComputeInstance_secondaryAliasIpRangeUpdate(networkName, subnetName, instanceName), Check: resource.ComposeTestCheckFunc( @@ -1828,7 +1883,51 @@ func TestAccComputeInstance_secondaryAliasIpRange(t *testing.T) { testAccCheckComputeInstanceHasAliasIpRange(&instance, "", "10.0.1.0/24"), ), }, + computeInstanceImportStep("us-east1-d", instanceName, []string{"network_interface.0.alias_ip_range.0.ip_cidr_range", "network_interface.0.alias_ip_range.0.subnetwork_range_name", "network_interface.0.alias_ip_range.1.ip_cidr_range", "network_interface.0.alias_ip_range.1.subnetwork_range_name"}), + }, + }) +} + +func TestAccComputeInstance_aliasIpRangeCommonAddresses(t *testing.T) { + t.Parallel() + + var instance compute.Instance + instanceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + networkName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + subnetName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_secondaryAliasIpRangeTwoAliasIps(networkName, subnetName, instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasAliasIpRange(&instance, "inst-test-tertiary", "10.1.1.0/24"), + testAccCheckComputeInstanceHasAliasIpRange(&instance, "inst-test-tertiary", "10.1.2.0/24"), + ), + }, computeInstanceImportStep("us-east1-d", instanceName, []string{}), + { + Config: testAccComputeInstance_secondaryAliasIpRangeUpdateWithCommonAddress(networkName, subnetName, instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasAliasIpRange(&instance, "inst-test-tertiary", "10.1.1.0/24"), + testAccCheckComputeInstanceHasAliasIpRange(&instance, "inst-test-tertiary", "10.1.3.0/24"), + ), + }, + computeInstanceImportStep("us-east1-d", instanceName, []string{}), + { + Config: testAccComputeInstance_secondaryAliasIpRangeUpdateWithCommonAddressDifferentRanges(networkName, subnetName, instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasAliasIpRange(&instance, "inst-test-secondary", "172.16.1.0/24"), + testAccCheckComputeInstanceHasAliasIpRange(&instance, "inst-test-tertiary", "10.1.3.0/24"), + ), + }, + computeInstanceImportStep("us-east1-d", instanceName, []string{"network_interface.0.alias_ip_range.0.ip_cidr_range", "network_interface.0.alias_ip_range.0.subnetwork_range_name", "network_interface.0.alias_ip_range.1.ip_cidr_range", "network_interface.0.alias_ip_range.1.subnetwork_range_name"}), }, }) } @@ -1927,11 +2026,22 @@ func TestAccComputeInstanceConfidentialInstanceConfigMain(t *testing.T) { testAccCheckComputeInstanceHasConfidentialInstanceConfig(&instance2, false, "SEV_SNP"), ), }, + {{- if ne $.TargetVersionName "ga" }} + { + Config: testAccComputeInstanceConfidentialInstanceConfigEnableTdx(instanceName, "TDX"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, "google_compute_instance.foobar5", &instance), + testAccCheckComputeInstanceHasConfidentialInstanceConfig(&instance, false, "TDX"), + ), + }, + {{- end }} }, }) } func TestAccComputeInstance_confidentialHyperDiskBootDisk(t *testing.T) { + // Currently failing + acctest.SkipIfVcr(t) t.Parallel() kms := acctest.BootstrapKMSKeyWithPurposeInLocationAndName(t, "ENCRYPT_DECRYPT", "us-central1", "tf-bootstrap-hyperdisk-key1") @@ -2563,15 +2673,72 @@ func TestAccComputeInstance_subnetworkUpdate(t *testing.T) { { Config: testAccComputeInstance_subnetworkUpdate(suffix, instanceName), }, - computeInstanceImportStep("us-east1-d", instanceName, []string{"allow_stopping_for_update"}), + computeInstanceImportStep("us-east1-d", instanceName, []string{"allow_stopping_for_update", "network_interface.0.alias_ip_range.0.ip_cidr_range", "network_interface.0.alias_ip_range.0.subnetwork_range_name", "network_interface.0.alias_ip_range.1.ip_cidr_range", "network_interface.0.alias_ip_range.1.subnetwork_range_name"}), { Config: testAccComputeInstance_subnetworkUpdateTwo(suffix, instanceName), }, - computeInstanceImportStep("us-east1-d", instanceName, []string{"allow_stopping_for_update"}), + computeInstanceImportStep("us-east1-d", instanceName, []string{"allow_stopping_for_update", "network_interface.0.alias_ip_range.0.ip_cidr_range", "network_interface.0.alias_ip_range.0.subnetwork_range_name", "network_interface.0.alias_ip_range.1.ip_cidr_range", "network_interface.0.alias_ip_range.1.subnetwork_range_name"}), { Config: testAccComputeInstance_subnetworkUpdate(suffix, instanceName), }, - computeInstanceImportStep("us-east1-d", instanceName, []string{"allow_stopping_for_update"}), + computeInstanceImportStep("us-east1-d", instanceName, []string{"allow_stopping_for_update", "network_interface.0.alias_ip_range.0.ip_cidr_range", "network_interface.0.alias_ip_range.0.subnetwork_range_name", "network_interface.0.alias_ip_range.1.ip_cidr_range", "network_interface.0.alias_ip_range.1.subnetwork_range_name"}), + }, + }) +} + +func TestAccComputeInstance_subnetworkProjectMustMatchError(t *testing.T) { + t.Parallel() + instanceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + suffix := fmt.Sprintf("%s", acctest.RandString(t, 10)) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_subnetworkProjectExpectError(suffix, instanceName), + ExpectError: regexp.MustCompile("must match subnetwork_project"), + }, + }, + }) +} + +func TestAccComputeInstance_networkIpUpdate(t *testing.T) { + t.Parallel() + + var instance compute.Instance + instanceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + suffix := fmt.Sprintf("%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_networkIpUpdate(suffix, instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasNetworkIP(&instance, "10.3.0.3"), + ), + }, + computeInstanceImportStep("us-east1-d", instanceName, []string{}), + { + Config: testAccComputeInstance_networkIpUpdateByHand(suffix, instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasNetworkIP(&instance, "10.3.0.4"), + ), + }, + computeInstanceImportStep("us-east1-d", instanceName, []string{}), + { + Config: testAccComputeInstance_networkIpUpdateWithComputeAddress(suffix, instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasNetworkIP(&instance, "10.3.0.5"), + ), + }, + computeInstanceImportStep("us-east1-d", instanceName, []string{}), }, }) } @@ -3964,7 +4131,7 @@ func testAccCheckComputeInstanceScratchDisk(instance *compute.Instance, interfac i, deviceName, disk.DeviceName) } } - + i++ } } @@ -7177,6 +7344,162 @@ resource "google_compute_instance" "foobar" { `, network, subnet, instance) } +func testAccComputeInstance_secondaryAliasIpRangeTwoAliasIps(network, subnet, instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_network" "inst-test-network" { + name = "%s" +} + +resource "google_compute_subnetwork" "inst-test-subnetwork" { + name = "%s" + ip_cidr_range = "10.0.0.0/16" + region = "us-east1" + network = google_compute_network.inst-test-network.self_link + secondary_ip_range { + range_name = "inst-test-secondary" + ip_cidr_range = "172.16.0.0/20" + } + secondary_ip_range { + range_name = "inst-test-tertiary" + ip_cidr_range = "10.1.0.0/16" + } +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-east1-d" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + subnetwork = google_compute_subnetwork.inst-test-subnetwork.self_link + alias_ip_range { + subnetwork_range_name = "inst-test-tertiary" + ip_cidr_range = "10.1.1.0/24" + } + alias_ip_range { + subnetwork_range_name = "inst-test-tertiary" + ip_cidr_range = "10.1.2.0/24" + } + } +} +`, network, subnet, instance) +} + +func testAccComputeInstance_secondaryAliasIpRangeUpdateWithCommonAddress(network, subnet, instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_network" "inst-test-network" { + name = "%s" +} + +resource "google_compute_subnetwork" "inst-test-subnetwork" { + name = "%s" + ip_cidr_range = "10.0.0.0/16" + region = "us-east1" + network = google_compute_network.inst-test-network.self_link + secondary_ip_range { + range_name = "inst-test-secondary" + ip_cidr_range = "172.16.0.0/20" + } + secondary_ip_range { + range_name = "inst-test-tertiary" + ip_cidr_range = "10.1.0.0/16" + } +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-east1-d" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + subnetwork = google_compute_subnetwork.inst-test-subnetwork.self_link + alias_ip_range { + subnetwork_range_name = "inst-test-tertiary" + ip_cidr_range = "10.1.1.0/24" + } + alias_ip_range { + subnetwork_range_name = "inst-test-tertiary" + ip_cidr_range = "10.1.3.0/24" + } + } +} +`, network, subnet, instance) +} + +func testAccComputeInstance_secondaryAliasIpRangeUpdateWithCommonAddressDifferentRanges(network, subnet, instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_network" "inst-test-network" { + name = "%s" +} + +resource "google_compute_subnetwork" "inst-test-subnetwork" { + name = "%s" + ip_cidr_range = "10.0.0.0/16" + region = "us-east1" + network = google_compute_network.inst-test-network.self_link + secondary_ip_range { + range_name = "inst-test-secondary" + ip_cidr_range = "172.16.0.0/20" + } + secondary_ip_range { + range_name = "inst-test-tertiary" + ip_cidr_range = "10.1.0.0/16" + } +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-east1-d" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + subnetwork = google_compute_subnetwork.inst-test-subnetwork.self_link + alias_ip_range { + subnetwork_range_name = "inst-test-secondary" + ip_cidr_range = "172.16.1.0/24" + } + alias_ip_range { + subnetwork_range_name = "inst-test-tertiary" + ip_cidr_range = "10.1.3.0/24" + } + } +} +`, network, subnet, instance) +} + func testAccComputeInstance_hostname(instance string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { @@ -7839,6 +8162,42 @@ resource "google_compute_instance" "foobar6" { `, instance, minCpuPlatform, confidentialInstanceType, instance, minCpuPlatform, confidentialInstanceType) } +{{ if ne $.TargetVersionName `ga` -}} +func testAccComputeInstanceConfidentialInstanceConfigEnableTdx(instance string, confidentialInstanceType string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image3" { + family = "ubuntu-2204-lts" + project = "tdx-guest-images" +} + +resource "google_compute_instance" "foobar5" { + name = "%s" + machine_type = "c3-standard-4" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image3.self_link + } + } + + network_interface { + network = "default" + } + + confidential_instance_config { + confidential_instance_type = %q + } + + scheduling { + on_host_maintenance = "TERMINATE" + } + +} +`, instance, confidentialInstanceType) +} +{{- end }} + func testAccComputeInstance_attributionLabelCreate(instance, add, strategy string) string { return fmt.Sprintf(` provider "google" { @@ -8352,6 +8711,183 @@ func testAccComputeInstance_subnetworkUpdateTwo(suffix, instance string) string `, suffix, suffix, suffix, suffix, instance) } +func testAccComputeInstance_subnetworkProjectExpectError(suffix, instance string) string { + return fmt.Sprintf(` + data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" + } + + resource "google_compute_network" "inst-test-network" { + name = "tf-test-network-%s" + auto_create_subnetworks = false + } + + resource "google_compute_subnetwork" "inst-test-subnetwork" { + name = "tf-test-compute-subnet-%s" + ip_cidr_range = "10.0.0.0/16" + region = "us-east1" + network = google_compute_network.inst-test-network.id + } + + resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-east1-d" + allow_stopping_for_update = true + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.id + } + } + + network_interface { + subnetwork = google_compute_subnetwork.inst-test-subnetwork.id + subnetwork_project = "placeholder" + } + } +`, suffix, suffix, instance) +} + +func testAccComputeInstance_networkIpUpdate(suffix, instance string) string { + return fmt.Sprintf(` + data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" + } + + resource "google_compute_network" "inst-test-network" { + name = "tf-test-network-%s" + auto_create_subnetworks = false + } + + resource "google_compute_subnetwork" "inst-test-subnetwork" { + name = "tf-test-compute-subnet-%s" + ip_cidr_range = "10.3.0.0/16" + region = "us-east1" + network = google_compute_network.inst-test-network.id + } + + resource "google_compute_address" "inst-test-address" { + name = "tf-test-compute-address-%s" + region = "us-east1" + subnetwork = google_compute_subnetwork.inst-test-subnetwork.id + address_type = "INTERNAL" + address = "10.3.0.5" + } + + resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-east1-d" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.id + } + } + + network_interface { + subnetwork = google_compute_subnetwork.inst-test-subnetwork.id + network_ip = "10.3.0.3" + } + } +`, suffix, suffix, suffix, instance) +} + +func testAccComputeInstance_networkIpUpdateByHand(suffix, instance string) string { + return fmt.Sprintf(` + data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" + } + + resource "google_compute_network" "inst-test-network" { + name = "tf-test-network-%s" + auto_create_subnetworks = false + } + + resource "google_compute_subnetwork" "inst-test-subnetwork" { + name = "tf-test-compute-subnet-%s" + ip_cidr_range = "10.3.0.0/16" + region = "us-east1" + network = google_compute_network.inst-test-network.id + } + + resource "google_compute_address" "inst-test-address" { + name = "tf-test-compute-address-%s" + region = "us-east1" + subnetwork = google_compute_subnetwork.inst-test-subnetwork.id + address_type = "INTERNAL" + address = "10.3.0.5" + } + + resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-east1-d" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.id + } + } + + network_interface { + subnetwork = google_compute_subnetwork.inst-test-subnetwork.id + network_ip = "10.3.0.4" + } + } +`, suffix, suffix, suffix, instance) +} + +func testAccComputeInstance_networkIpUpdateWithComputeAddress(suffix, instance string) string { + return fmt.Sprintf(` + data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" + } + + resource "google_compute_network" "inst-test-network" { + name = "tf-test-network-%s" + auto_create_subnetworks = false + } + + resource "google_compute_subnetwork" "inst-test-subnetwork" { + name = "tf-test-compute-subnet-%s" + ip_cidr_range = "10.3.0.0/16" + region = "us-east1" + network = google_compute_network.inst-test-network.id + } + + resource "google_compute_address" "inst-test-address" { + name = "tf-test-compute-address-%s" + region = "us-east1" + subnetwork = google_compute_subnetwork.inst-test-subnetwork.id + address_type = "INTERNAL" + address = "10.3.0.5" + } + + resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-east1-d" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.id + } + } + + network_interface { + subnetwork = google_compute_subnetwork.inst-test-subnetwork.id + network_ip = google_compute_address.inst-test-address.address + } + } +`, suffix, suffix, suffix, instance) +} + func testAccComputeInstance_queueCountSet(instance string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { @@ -8640,7 +9176,7 @@ resource "google_compute_instance" "foobar" { } partner_metadata = { - "test.compute.googleapis.com" = jsonencode({ + "test.compute.googleapis.com" = jsonencode({ entries = { key1 = "value1" key2 = 2 @@ -8994,7 +9530,7 @@ resource "google_compute_subnetwork" "subnet2" { stack_type = "IPV4_ONLY" network = google_compute_network.net2.id } - + resource "google_compute_subnetwork" "subnet-ipv62" { region = "europe-west1" name = "tf-test-subnet-ip62-%s" @@ -9009,7 +9545,7 @@ resource "google_compute_address" "normal-address2" { region = "europe-west1" name = "tf-test-addr-normal2-%s" } - + resource "google_compute_address" "ipv6-address2" { region = "europe-west1" name = "tf-test-addr-ipv62-%s" @@ -9166,7 +9702,7 @@ resource "google_compute_subnetwork" "subnet2" { stack_type = "IPV4_ONLY" network = google_compute_network.net2.id } - + resource "google_compute_subnetwork" "subnet-ipv62" { region = "europe-west1" name = "tf-test-subnet-ip62-%s" @@ -9181,7 +9717,7 @@ resource "google_compute_address" "normal-address2" { region = "europe-west1" name = "tf-test-addr-normal2-%s" } - + resource "google_compute_address" "ipv6-address2" { region = "europe-west1" name = "tf-test-addr-ipv62-%s" @@ -9722,6 +10258,8 @@ resource "google_compute_instance" "foobar" { } func TestAccComputeInstance_bootDisk_storagePoolSpecified(t *testing.T) { + // Currently failing + acctest.SkipIfVcr(t) t.Parallel() instanceName := fmt.Sprintf("tf-test-instance-%s", acctest.RandString(t, 10)) @@ -9817,3 +10355,79 @@ resource "google_compute_instance" "foobar" { } `, instanceName, zone, storagePoolUrl) } + +func TestAccComputeInstance_bootAndAttachedDisk_interface(t *testing.T) { + t.Parallel() + + instanceName1 := fmt.Sprintf("tf-test-vm1-%s", acctest.RandString(t, 10)) + diskName1 := fmt.Sprintf("tf-test-disk1-%s", acctest.RandString(t, 10)) + instanceName2 := fmt.Sprintf("tf-test-vm2-%s", acctest.RandString(t, 10)) + diskName2 := fmt.Sprintf("tf-test-disk2-%s", acctest.RandString(t, 10)) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_bootAndAttachedDisk_interface(instanceName1, diskName1, envvar.GetTestZoneFromEnv(), "h3-standard-88", "NVME", false), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_compute_instance.foobar", "boot_disk.0.interface", "NVME"), + resource.TestCheckResourceAttr("google_compute_instance.foobar", "machine_type", "h3-standard-88"), + ), + }, + //computeInstanceImportStep("us-central1-a", instanceName1, []string{"desired_status","allow_stopping_for_update"}), + { + Config: testAccComputeInstance_bootAndAttachedDisk_interface(instanceName2, diskName2, envvar.GetTestZoneFromEnv(), "n2-standard-8", "SCSI", true), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_compute_instance.foobar", "boot_disk.0.interface", "SCSI"), + resource.TestCheckResourceAttr("google_compute_instance.foobar", "machine_type", "n2-standard-8"), + ), + }, + //computeInstanceImportStep("us-central1-a", instanceName2, []string{"desired_status","allow_stopping_for_update"}), + }, + }) +} + +func testAccComputeInstance_bootAndAttachedDisk_interface(instanceName, diskName, zone, machineType, bootDiskInterface string, allowStoppingForUpdate bool) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "ubuntu-2204-lts" + project = "ubuntu-os-cloud" +} + +data "google_project" "project" {} + +resource "google_compute_disk" "foorbarattach" { + name = "%s" + size = 100 + type = "pd-balanced" + zone = "us-central1-a" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type= "%s" + zone = "%s" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + type = "pd-balanced" + size = 500 + } + interface = "%s" + + } + + attached_disk { + source = google_compute_disk.foorbarattach.self_link + } + + network_interface { + network = "default" + } + allow_stopping_for_update = %t + desired_status = "RUNNING" + +} +`, diskName, instanceName, machineType, zone, bootDiskInterface, allowStoppingForUpdate) +} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_network_firewall_policy_rule_test.go b/mmv1/third_party/terraform/services/compute/go/resource_compute_network_firewall_policy_rule_test.go index 30c7e2c11691..cfaaebaa5840 100644 --- a/mmv1/third_party/terraform/services/compute/go/resource_compute_network_firewall_policy_rule_test.go +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_network_firewall_policy_rule_test.go @@ -66,6 +66,8 @@ func TestAccComputeNetworkFirewallPolicyRule_update(t *testing.T) { } func TestAccComputeNetworkFirewallPolicyRule_multipleRules(t *testing.T) { + // Currently failing + acctest.SkipIfVcr(t) t.Parallel() context := map[string]interface{}{ diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_region_backend_service_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_region_backend_service_test.go.tmpl index ddd4243dd38c..5a2eed79d6b4 100644 --- a/mmv1/third_party/terraform/services/compute/go/resource_compute_region_backend_service_test.go.tmpl +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_region_backend_service_test.go.tmpl @@ -262,22 +262,22 @@ func TestAccComputeRegionBackendService_withBackendAndIAP(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckComputeRegionBackendServiceDestroyProducer(t), Steps: []resource.TestStep{ - { - Config: testAccComputeRegionBackendService_ilbBasicwithIAP(backendName, checkName), + { + Config: testAccComputeRegionBackendService_ilbBasic(backendName, checkName), }, { ResourceName: "google_compute_region_backend_service.foobar", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"iap.0.oauth2_client_secret"}, }, { - Config: testAccComputeRegionBackendService_ilbBasic(backendName, checkName), + Config: testAccComputeRegionBackendService_ilbBasicwithIAP(backendName, checkName), }, { ResourceName: "google_compute_region_backend_service.foobar", ImportState: true, ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"iap.0.oauth2_client_secret"}, }, }, }) @@ -589,7 +589,8 @@ resource "google_compute_region_backend_service" "foobar" { health_checks = [google_compute_health_check.zero.self_link] region = "us-central1" - protocol = "%s" + protocol = "%s" + connection_draining_timeout_sec = 0 failover_policy { # Disable connection drain on failover cannot be set when the protocol is UDP drop_traffic_if_unhealthy = "%s" @@ -615,7 +616,8 @@ resource "google_compute_region_backend_service" "foobar" { health_checks = [google_compute_health_check.zero.self_link] region = "us-central1" - protocol = "%s" + protocol = "%s" + connection_draining_timeout_sec = 0 failover_policy { # Disable connection drain on failover cannot be set when the protocol is UDP drop_traffic_if_unhealthy = "%s" @@ -703,6 +705,7 @@ resource "google_compute_region_backend_service" "lipsum" { backend { group = google_compute_instance_group_manager.foobar.instance_group + balancing_mode = "CONNECTION" {{- if or (ne $.TargetVersionName "") (eq $.TargetVersionName "ga") }} failover = true } @@ -772,6 +775,7 @@ resource "google_compute_region_backend_service" "lipsum" { backend { group = google_compute_instance_group_manager.foobar.instance_group + balancing_mode = "CONNECTION" {{- if or (ne $.TargetVersionName "") (eq $.TargetVersionName "ga") }} failover = true } @@ -874,6 +878,7 @@ resource "google_compute_region_backend_service" "lipsum" { backend { group = google_compute_instance_group_manager.foobar.instance_group capacity_scaler = 1.0 + balancing_mode = "CONNECTION" } health_checks = [google_compute_health_check.default.self_link] @@ -1042,6 +1047,7 @@ resource "google_compute_region_backend_service" "foobar" { } iap { + enabled = true oauth2_client_id = "test" oauth2_client_secret = "test" } diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_region_instance_template.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_region_instance_template.go.tmpl index 3439bb4abb7a..1c99f210953e 100644 --- a/mmv1/third_party/terraform/services/compute/go/resource_compute_region_instance_template.go.tmpl +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_region_instance_template.go.tmpl @@ -76,11 +76,11 @@ func ResourceComputeRegionInstanceTemplate() *schema.Resource { Description: `Creates a unique name beginning with the specified prefix. Conflicts with name.`, ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { // https://cloud.google.com/compute/docs/reference/latest/instanceTemplates#resource - // uuid is 26 characters, limit the prefix to 37. + // uuid is 9 characters, limit the prefix to 54. value := v.(string) - if len(value) > 37 { + if len(value) > 54 { errors = append(errors, fmt.Errorf( - "%q cannot be longer than 37 characters, name is limited to 63", k)) + "%q cannot be longer than 54 characters, name is limited to 63", k)) } return }, @@ -858,6 +858,7 @@ be from 0 to 999,999,999 inclusive.`, Description: `Defines whether the instance should have confidential compute enabled. Field will be deprecated in a future release.`, AtLeastOneOf: []string{"confidential_instance_config.0.enable_confidential_compute", "confidential_instance_config.0.confidential_instance_type"}, }, + {{- if eq $.TargetVersionName "ga" }} "confidential_instance_type": { Type: schema.TypeString, Optional: true, @@ -868,6 +869,19 @@ be from 0 to 999,999,999 inclusive.`, If SEV_SNP, min_cpu_platform = "AMD Milan" is currently required.`, AtLeastOneOf: []string{"confidential_instance_config.0.enable_confidential_compute", "confidential_instance_config.0.confidential_instance_type"}, }, + {{- else }} + "confidential_instance_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: ` + The confidential computing technology the instance uses. + SEV is an AMD feature. TDX is an Intel feature. One of the following + values is required: SEV, SEV_SNP, TDX. If SEV_SNP, min_cpu_platform = + "AMD Milan" is currently required.`, + AtLeastOneOf: []string{"confidential_instance_config.0.enable_confidential_compute", "confidential_instance_config.0.confidential_instance_type"}, + }, + {{- end }} }, }, }, @@ -955,7 +969,7 @@ be from 0 to 999,999,999 inclusive.`, Elem: &schema.Schema{Type: schema.TypeString}, Set: schema.HashString, Description: `A set of key/value label pairs to assign to instances created from this template, - + **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.`, }, @@ -1129,7 +1143,12 @@ func resourceComputeRegionInstanceTemplateCreate(d *schema.ResourceData, meta in if v, ok := d.GetOk("name"); ok { itName = v.(string) } else if v, ok := d.GetOk("name_prefix"); ok { - itName = id.PrefixedUniqueId(v.(string)) + prefix := v.(string) + if len(prefix) > 37 { + itName = tpgresource.ReducedPrefixedUniqueId(prefix) + } else { + itName = id.PrefixedUniqueId(prefix) + } } else { itName = id.UniqueId() } diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_region_instance_template_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_region_instance_template_test.go.tmpl index d22cdd5968b0..06c9ada78409 100644 --- a/mmv1/third_party/terraform/services/compute/go/resource_compute_region_instance_template_test.go.tmpl +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_region_instance_template_test.go.tmpl @@ -714,6 +714,15 @@ func TestAccComputeRegionInstanceTemplate_ConfidentialInstanceConfigMain(t *test testAccCheckComputeRegionInstanceTemplateHasConfidentialInstanceConfig(&instanceTemplate2, false, "SEV_SNP"), ), }, + {{- if ne $.TargetVersionName "ga" }} + { + Config: testAccComputeRegionInstanceTemplateConfidentialInstanceConfigEnableTdx(acctest.RandString(t, 10), "TDX"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionInstanceTemplateExists(t, "google_compute_region_instance_template.foobar5", &instanceTemplate), + testAccCheckComputeRegionInstanceTemplateHasConfidentialInstanceConfig(&instanceTemplate, false, "TDX"), + ), + }, + {{- end }} }, }) } @@ -2973,6 +2982,42 @@ resource "google_compute_region_instance_template" "foobar4" { `, suffix, minCpuPlatform, confidentialInstanceType, suffix, minCpuPlatform, confidentialInstanceType) } +{{ if ne $.TargetVersionName `ga` -}} +func testAccComputeRegionInstanceTemplateConfidentialInstanceConfigEnableTdx(suffix string, confidentialInstanceType string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image3" { + family = "ubuntu-2204-lts" + project = "tdx-guest-images" +} + +resource "google_compute_region_instance_template" "foobar5" { + name = "tf-test-instance-template-%s" + machine_type = "c3-standard-4" + region = "us-central1" + + disk { + source_image = data.google_compute_image.my_image3.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + confidential_instance_config { + confidential_instance_type = %q + } + + scheduling { + on_host_maintenance = "TERMINATE" + } + +} +`, suffix, confidentialInstanceType) +} +{{- end }} + func testAccComputeRegionInstanceTemplateAdvancedMachineFeatures(suffix string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { @@ -3519,7 +3564,7 @@ resource "google_compute_region_instance_template" "foobar" { } partner_metadata = { - "test.compute.googleapis.com" = jsonencode({ + "test.compute.googleapis.com" = jsonencode({ entries = { key1 = "value1" key2 = 2 diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_region_target_https_proxy_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_region_target_https_proxy_test.go.tmpl new file mode 100644 index 000000000000..29631d2844ed --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_region_target_https_proxy_test.go.tmpl @@ -0,0 +1,992 @@ +package compute_test + +import ( + "fmt" + "testing" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" +) + +func TestAccComputeRegionTargetHttpsProxy_update(t *testing.T) { + t.Parallel() + + resourceSuffix := acctest.RandString(t, 10) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeTargetHttpsProxyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionTargetHttpsProxy_basic1(resourceSuffix), + }, + { + ResourceName: "google_compute_region_target_https_proxy.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRegionTargetHttpsProxy_basic2(resourceSuffix), + }, + { + ResourceName: "google_compute_region_target_https_proxy.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRegionTargetHttpsProxy_basic3(resourceSuffix), + }, + { + ResourceName: "google_compute_region_target_https_proxy.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccComputeRegionTargetHttpsProxy_basic1(id string) string { + return fmt.Sprintf(` +resource "google_compute_region_target_https_proxy" "foobar" { + description = "Resource created for Terraform acceptance testing" + name = "httpsproxy-test-%s" + url_map = google_compute_region_url_map.foobar1.self_link + ssl_certificates = [google_compute_region_ssl_certificate.foobar1.self_link] +} + +resource "google_compute_region_backend_service" "foobar1" { + name = "httpsproxy-test-backend1-%s" + health_checks = [google_compute_region_health_check.zero.self_link] + protocol = "HTTP" + load_balancing_scheme = "INTERNAL_MANAGED" +} + +resource "google_compute_region_backend_service" "foobar2" { + name = "httpsproxy-test-backend2-%s" + health_checks = [google_compute_region_health_check.one.self_link] + protocol = "HTTP" + load_balancing_scheme = "INTERNAL_MANAGED" +} + +resource "google_compute_region_health_check" "zero" { + name = "httpsproxy-test-health-check1-%s" + http_health_check { + port = 443 + } +} + +resource "google_compute_region_health_check" "one" { + name = "httpsproxy-test-health-check2-%s" + http_health_check { + port = 443 + } +} + +resource "google_compute_region_url_map" "foobar1" { + name = "httpsproxy-test-url-map1-%s" + default_service = google_compute_region_backend_service.foobar1.self_link + host_rule { + hosts = ["mysite.com", "myothersite.com"] + path_matcher = "boop" + } + path_matcher { + default_service = google_compute_region_backend_service.foobar1.self_link + name = "boop" + path_rule { + paths = ["/*"] + service = google_compute_region_backend_service.foobar1.self_link + } + } + test { + host = "mysite.com" + path = "/*" + service = google_compute_region_backend_service.foobar1.self_link + } +} + +resource "google_compute_region_url_map" "foobar2" { + name = "httpsproxy-test-url-map2-%s" + default_service = google_compute_region_backend_service.foobar2.self_link + host_rule { + hosts = ["mysite2.com", "myothersite2.com"] + path_matcher = "boop" + } + path_matcher { + default_service = google_compute_region_backend_service.foobar2.self_link + name = "boop" + path_rule { + paths = ["/*"] + service = google_compute_region_backend_service.foobar2.self_link + } + } + test { + host = "mysite2.com" + path = "/*" + service = google_compute_region_backend_service.foobar2.self_link + } +} + +resource "google_compute_region_ssl_certificate" "foobar1" { + name = "httpsproxy-test-cert1-%s" + description = "very descriptive" + private_key = file("test-fixtures/test.key") + certificate = file("test-fixtures/test.crt") +} + +resource "google_compute_region_ssl_certificate" "foobar2" { + name = "httpsproxy-test-cert2-%s" + description = "very descriptive" + private_key = file("test-fixtures/test.key") + certificate = file("test-fixtures/test.crt") +} +`, id, id, id, id, id, id, id, id, id) +} + +func testAccComputeRegionTargetHttpsProxy_basic2(id string) string { + return fmt.Sprintf(` +resource "google_compute_region_target_https_proxy" "foobar" { + description = "Resource created for Terraform acceptance testing" + name = "httpsproxy-test-%s" + url_map = google_compute_region_url_map.foobar2.self_link + ssl_certificates = [ + google_compute_region_ssl_certificate.foobar1.self_link, + google_compute_region_ssl_certificate.foobar2.self_link, + ] +} + +resource "google_compute_region_backend_service" "foobar1" { + name = "httpsproxy-test-backend1-%s" + health_checks = [google_compute_region_health_check.zero.self_link] + protocol = "HTTP" + load_balancing_scheme = "INTERNAL_MANAGED" +} + +resource "google_compute_region_backend_service" "foobar2" { + name = "httpsproxy-test-backend2-%s" + health_checks = [google_compute_region_health_check.one.self_link] + protocol = "HTTP" + load_balancing_scheme = "INTERNAL_MANAGED" +} + +resource "google_compute_region_health_check" "zero" { + name = "httpsproxy-test-health-check1-%s" + http_health_check { + port = 443 + } +} + +resource "google_compute_region_health_check" "one" { + name = "httpsproxy-test-health-check2-%s" + http_health_check { + port = 443 + } +} + +resource "google_compute_region_url_map" "foobar1" { + name = "httpsproxy-test-url-map1-%s" + default_service = google_compute_region_backend_service.foobar1.self_link + host_rule { + hosts = ["mysite.com", "myothersite.com"] + path_matcher = "boop" + } + path_matcher { + default_service = google_compute_region_backend_service.foobar1.self_link + name = "boop" + path_rule { + paths = ["/*"] + service = google_compute_region_backend_service.foobar1.self_link + } + } + test { + host = "mysite.com" + path = "/*" + service = google_compute_region_backend_service.foobar1.self_link + } +} + +resource "google_compute_region_url_map" "foobar2" { + name = "httpsproxy-test-url-map2-%s" + default_service = google_compute_region_backend_service.foobar2.self_link + host_rule { + hosts = ["mysite2.com", "myothersite2.com"] + path_matcher = "boop" + } + path_matcher { + default_service = google_compute_region_backend_service.foobar2.self_link + name = "boop" + path_rule { + paths = ["/*"] + service = google_compute_region_backend_service.foobar2.self_link + } + } + test { + host = "mysite2.com" + path = "/*" + service = google_compute_region_backend_service.foobar2.self_link + } +} + +resource "google_compute_region_ssl_certificate" "foobar1" { + name = "httpsproxy-test-cert1-%s" + description = "very descriptive" + private_key = file("test-fixtures/test.key") + certificate = file("test-fixtures/test.crt") +} + +resource "google_compute_region_ssl_certificate" "foobar2" { + name = "httpsproxy-test-cert2-%s" + description = "very descriptive" + private_key = file("test-fixtures/test.key") + certificate = file("test-fixtures/test.crt") +} +`, id, id, id, id, id, id, id, id, id) +} + +func testAccComputeRegionTargetHttpsProxy_basic3(id string) string { + return fmt.Sprintf(` +resource "google_compute_region_target_https_proxy" "foobar" { + description = "Resource created for Terraform acceptance testing" + name = "httpsproxy-test-%s" + url_map = google_compute_region_url_map.foobar2.self_link + ssl_certificates = [google_compute_region_ssl_certificate.foobar2.self_link] + ssl_policy = google_compute_region_ssl_policy.foobar.self_link +} + +resource "google_compute_region_backend_service" "foobar1" { + name = "httpsproxy-test-backend1-%s" + health_checks = [google_compute_region_health_check.zero.self_link] + protocol = "HTTP" + load_balancing_scheme = "INTERNAL_MANAGED" +} + +resource "google_compute_region_backend_service" "foobar2" { + name = "httpsproxy-test-backend2-%s" + health_checks = [google_compute_region_health_check.one.self_link] + protocol = "HTTP" + load_balancing_scheme = "INTERNAL_MANAGED" +} + +resource "google_compute_region_health_check" "zero" { + name = "httpsproxy-test-health-check1-%s" + http_health_check { + port = 443 + } +} + +resource "google_compute_region_health_check" "one" { + name = "httpsproxy-test-health-check2-%s" + http_health_check { + port = 443 + } +} + +resource "google_compute_region_url_map" "foobar1" { + name = "httpsproxy-test-url-map1-%s" + default_service = google_compute_region_backend_service.foobar1.self_link + host_rule { + hosts = ["mysite.com", "myothersite.com"] + path_matcher = "boop" + } + path_matcher { + default_service = google_compute_region_backend_service.foobar1.self_link + name = "boop" + path_rule { + paths = ["/*"] + service = google_compute_region_backend_service.foobar1.self_link + } + } + test { + host = "mysite.com" + path = "/*" + service = google_compute_region_backend_service.foobar1.self_link + } +} + +resource "google_compute_region_url_map" "foobar2" { + name = "httpsproxy-test-url-map2-%s" + default_service = google_compute_region_backend_service.foobar2.self_link + host_rule { + hosts = ["mysite2.com", "myothersite2.com"] + path_matcher = "boop" + } + path_matcher { + default_service = google_compute_region_backend_service.foobar2.self_link + name = "boop" + path_rule { + paths = ["/*"] + service = google_compute_region_backend_service.foobar2.self_link + } + } + test { + host = "mysite2.com" + path = "/*" + service = google_compute_region_backend_service.foobar2.self_link + } +} + +resource "google_compute_region_ssl_policy" "foobar" { + name = "sslproxy-test-%s" + description = "my-description" + min_tls_version = "TLS_1_2" + profile = "MODERN" + region = "us-central1" +} + +resource "google_compute_region_ssl_certificate" "foobar1" { + name = "httpsproxy-test-cert1-%s" + description = "very descriptive" + private_key = file("test-fixtures/test.key") + certificate = file("test-fixtures/test.crt") +} + +resource "google_compute_region_ssl_certificate" "foobar2" { + name = "httpsproxy-test-cert2-%s" + description = "very descriptive" + private_key = file("test-fixtures/test.key") + certificate = file("test-fixtures/test.crt") +} +`, id, id, id, id, id, id, id, id, id, id) +} + +func TestAccComputeRegionTargetHttpsProxy_addSslPolicy_withForwardingRule(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "resource_suffix": acctest.RandString(t, 10), + "project_id": envvar.GetTestProjectFromEnv(), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeTargetHttpsProxyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionTargetHttpsProxy_withForwardingRule(context), + }, + { + ResourceName: "google_compute_region_target_https_proxy.default-https", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRegionTargetHttpsProxy_withForwardingRule_withSslPolicy(context), + }, + { + ResourceName: "google_compute_region_target_https_proxy.default-https", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccComputeRegionTargetHttpsProxy_withForwardingRule(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_forwarding_rule" "default-https" { + project = "%{project_id}" + region = "us-central1" + name = "https-frwd-rule-%{resource_suffix}" + load_balancing_scheme = "INTERNAL_MANAGED" + target = google_compute_region_target_https_proxy.default-https.self_link + network = google_compute_network.ilb_network.name + subnetwork = google_compute_subnetwork.ilb_subnet.name + ip_address = google_compute_address.consumer_address.id + ip_protocol = "TCP" + port_range = "443" + allow_global_access = "true" + depends_on = [google_compute_subnetwork.ilb_subnet2] +} + +resource "google_compute_region_backend_service" "default" { + project = "%{project_id}" + region = "us-central1" + name = "backend-service-%{resource_suffix}" + protocol = "HTTPS" + port_name = "https-server" + load_balancing_scheme = "INTERNAL_MANAGED" + session_affinity = "HTTP_COOKIE" + health_checks = [google_compute_region_health_check.default.self_link] + locality_lb_policy = "RING_HASH" + + # websocket handling: https://stackoverflow.com/questions/63822612/websocket-connection-being-closed-on-google-compute-engine + timeout_sec = 600 + + consistent_hash { + http_cookie { + ttl { + # 24hr cookie ttl + seconds = 86400 + nanos = null + } + name = "X-CLIENT-SESSION" + path = null + } + http_header_name = null + minimum_ring_size = 1024 + } + + log_config { + enable = true + sample_rate = 1.0 + } +} + +resource "google_compute_region_health_check" "default" { + project = "%{project_id}" + region = "us-central1" + name = "hc-%{resource_suffix}" + timeout_sec = 5 + check_interval_sec = 30 + healthy_threshold = 3 + unhealthy_threshold = 3 + + https_health_check { + port = 443 + request_path = "/health" + } +} + +resource "google_compute_region_target_https_proxy" "default-https" { + project = "%{project_id}" + region = "us-central1" + name = "https-proxy-%{resource_suffix}" + url_map = google_compute_region_url_map.default-https.self_link + ssl_certificates = [google_compute_region_ssl_certificate.foobar0.self_link] +} + +resource "google_compute_region_url_map" "default-https" { + project = "%{project_id}" + region = "us-central1" + name = "lb-%{resource_suffix}" + default_service = google_compute_region_backend_service.default.id +} + +resource "google_compute_region_ssl_certificate" "foobar0" { + name = "httpsproxy-test-cert0-%{resource_suffix}" + description = "very descriptive" + private_key = file("test-fixtures/test.key") + certificate = file("test-fixtures/test.crt") +} + +resource "google_compute_network" "ilb_network" { + name = "tf-test-l4-ilb-network-%{resource_suffix}" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "ilb_subnet" { + name = "tf-test-l4-ilb-subnet-%{resource_suffix}" + ip_cidr_range = "10.0.1.0/24" + region = "us-central1" + network = google_compute_network.ilb_network.id +} + +resource "google_compute_subnetwork" "ilb_subnet2" { + name = "tf-test-l4-ilb-subnet2-%{resource_suffix}" + ip_cidr_range = "10.142.0.0/20" + region = "us-central1" + purpose = "REGIONAL_MANAGED_PROXY" + role = "ACTIVE" + network = google_compute_network.ilb_network.id +} + +resource "google_compute_address" "consumer_address" { + name = "tf-test-website-ip-%{resource_suffix}-1" + region = "us-central1" + subnetwork = google_compute_subnetwork.ilb_subnet.id + address_type = "INTERNAL" +} +`, context) +} + +func testAccComputeRegionTargetHttpsProxy_withForwardingRule_withSslPolicy(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_forwarding_rule" "default-https" { + project = "%{project_id}" + region = "us-central1" + name = "https-frwd-rule-%{resource_suffix}" + load_balancing_scheme = "INTERNAL_MANAGED" + target = google_compute_region_target_https_proxy.default-https.self_link + network = google_compute_network.ilb_network.name + subnetwork = google_compute_subnetwork.ilb_subnet.name + ip_address = google_compute_address.consumer_address.id + ip_protocol = "TCP" + port_range = "443" + allow_global_access = "true" + depends_on = [google_compute_subnetwork.ilb_subnet2] +} + +resource "google_compute_region_backend_service" "default" { + project = "%{project_id}" + region = "us-central1" + name = "backend-service-%{resource_suffix}" + protocol = "HTTPS" + port_name = "https-server" + load_balancing_scheme = "INTERNAL_MANAGED" + session_affinity = "HTTP_COOKIE" + health_checks = [google_compute_region_health_check.default.self_link] + locality_lb_policy = "RING_HASH" + + # websocket handling: https://stackoverflow.com/questions/63822612/websocket-connection-being-closed-on-google-compute-engine + timeout_sec = 600 + + consistent_hash { + http_cookie { + ttl { + # 24hr cookie ttl + seconds = 86400 + nanos = null + } + name = "X-CLIENT-SESSION" + path = null + } + http_header_name = null + minimum_ring_size = 1024 + } + + log_config { + enable = true + sample_rate = 1.0 + } +} + +resource "google_compute_region_health_check" "default" { + project = "%{project_id}" + region = "us-central1" + name = "hc-%{resource_suffix}" + timeout_sec = 5 + check_interval_sec = 30 + healthy_threshold = 3 + unhealthy_threshold = 3 + + https_health_check { + port = 443 + request_path = "/health" + } +} + +resource "google_compute_region_target_https_proxy" "default-https" { + project = "%{project_id}" + region = "us-central1" + name = "https-proxy-%{resource_suffix}" + url_map = google_compute_region_url_map.default-https.self_link + ssl_certificates = [google_compute_region_ssl_certificate.foobar0.self_link] + ssl_policy = google_compute_region_ssl_policy.default.id +} + +resource "google_compute_region_url_map" "default-https" { + project = "%{project_id}" + region = "us-central1" + name = "lb-%{resource_suffix}" + default_service = google_compute_region_backend_service.default.id +} + +resource "google_compute_region_ssl_policy" "default" { + project = "%{project_id}" + region = "us-central1" + name = "ssl-policy-%{resource_suffix}" + + profile = "RESTRICTED" + min_tls_version = "TLS_1_2" +} + +resource "google_compute_region_ssl_certificate" "foobar0" { + name = "httpsproxy-test-cert0-%{resource_suffix}" + description = "very descriptive" + private_key = file("test-fixtures/test.key") + certificate = file("test-fixtures/test.crt") +} + +resource "google_compute_network" "ilb_network" { + name = "tf-test-l4-ilb-network-%{resource_suffix}" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "ilb_subnet" { + name = "tf-test-l4-ilb-subnet-%{resource_suffix}" + ip_cidr_range = "10.0.1.0/24" + region = "us-central1" + network = google_compute_network.ilb_network.id +} + +resource "google_compute_subnetwork" "ilb_subnet2" { + name = "tf-test-l4-ilb-subnet2-%{resource_suffix}" + ip_cidr_range = "10.142.0.0/20" + region = "us-central1" + purpose = "REGIONAL_MANAGED_PROXY" + role = "ACTIVE" + network = google_compute_network.ilb_network.id +} + +resource "google_compute_address" "consumer_address" { + name = "tf-test-website-ip-%{resource_suffix}-1" + region = "us-central1" + subnetwork = google_compute_subnetwork.ilb_subnet.id + address_type = "INTERNAL" +} +`, context) +} + +{{ if ne $.TargetVersionName `ga` -}} + +func TestAccComputeRegionTargetHttpsProxy_addServerTlsPolicy_withForwardingRule(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "resource_suffix": acctest.RandString(t, 10), + "project_id": envvar.GetTestProjectFromEnv(), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckComputeTargetHttpsProxyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionTargetHttpsProxy_withForwardingRule_withoutServerTlsPolicy(context), + }, + { + ResourceName: "google_compute_region_target_https_proxy.default-https", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRegionTargetHttpsProxy_withForwardingRule_withServerTlsPolicy(context), + }, + { + ResourceName: "google_compute_region_target_https_proxy.default-https", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRegionTargetHttpsProxy_withForwardingRule_withoutServerTlsPolicy(context), + }, + { + ResourceName: "google_compute_region_target_https_proxy.default-https", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccComputeRegionTargetHttpsProxy_withForwardingRule_withoutServerTlsPolicy(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_forwarding_rule" "default-https" { + provider = google-beta + + project = "%{project_id}" + region = "us-central1" + name = "https-frwd-rule-%{resource_suffix}" + load_balancing_scheme = "INTERNAL_MANAGED" + target = google_compute_region_target_https_proxy.default-https.self_link + network = google_compute_network.ilb_network.name + subnetwork = google_compute_subnetwork.ilb_subnet.name + ip_address = google_compute_address.consumer_address.id + ip_protocol = "TCP" + port_range = "443" + allow_global_access = "true" + depends_on = [google_compute_subnetwork.ilb_subnet2] +} + +resource "google_compute_region_backend_service" "default" { + provider = google-beta + + project = "%{project_id}" + region = "us-central1" + name = "backend-service-%{resource_suffix}" + protocol = "HTTPS" + port_name = "https-server" + load_balancing_scheme = "INTERNAL_MANAGED" + session_affinity = "HTTP_COOKIE" + health_checks = [google_compute_region_health_check.default.self_link] + locality_lb_policy = "RING_HASH" + + # websocket handling: https://stackoverflow.com/questions/63822612/websocket-connection-being-closed-on-google-compute-engine + timeout_sec = 600 + + consistent_hash { + http_cookie { + ttl { + # 24hr cookie ttl + seconds = 86400 + nanos = null + } + name = "X-CLIENT-SESSION" + path = null + } + http_header_name = null + minimum_ring_size = 1024 + } + + log_config { + enable = true + sample_rate = 1.0 + } +} + +resource "google_compute_region_health_check" "default" { + provider = google-beta + + project = "%{project_id}" + region = "us-central1" + name = "hc-%{resource_suffix}" + timeout_sec = 5 + check_interval_sec = 30 + healthy_threshold = 3 + unhealthy_threshold = 3 + + https_health_check { + port = 443 + request_path = "/health" + } +} + +resource "google_compute_region_target_https_proxy" "default-https" { + provider = google-beta + + project = "%{project_id}" + region = "us-central1" + name = "https-proxy-%{resource_suffix}" + url_map = google_compute_region_url_map.default-https.self_link + ssl_certificates = [google_compute_region_ssl_certificate.foobar0.self_link] +} + +resource "google_compute_region_url_map" "default-https" { + provider = google-beta + + project = "%{project_id}" + region = "us-central1" + name = "lb-%{resource_suffix}" + default_service = google_compute_region_backend_service.default.id +} + +resource "google_compute_region_ssl_certificate" "foobar0" { + provider = google-beta + + name = "httpsproxy-test-cert0-%{resource_suffix}" + description = "very descriptive" + private_key = file("test-fixtures/test.key") + certificate = file("test-fixtures/test.crt") +} + +resource "google_compute_network" "ilb_network" { + provider = google-beta + + name = "tf-test-l4-ilb-network-%{resource_suffix}" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "ilb_subnet" { + provider = google-beta + + name = "tf-test-l4-ilb-subnet-%{resource_suffix}" + ip_cidr_range = "10.0.1.0/24" + region = "us-central1" + network = google_compute_network.ilb_network.id +} + +resource "google_compute_subnetwork" "ilb_subnet2" { + provider = google-beta + + name = "tf-test-l4-ilb-subnet2-%{resource_suffix}" + ip_cidr_range = "10.142.0.0/20" + region = "us-central1" + purpose = "REGIONAL_MANAGED_PROXY" + role = "ACTIVE" + network = google_compute_network.ilb_network.id +} + +resource "google_compute_address" "consumer_address" { + provider = google-beta + + name = "tf-test-website-ip-%{resource_suffix}-1" + region = "us-central1" + subnetwork = google_compute_subnetwork.ilb_subnet.id + address_type = "INTERNAL" +} +`, context) +} + +func testAccComputeRegionTargetHttpsProxy_withForwardingRule_withServerTlsPolicy(context map[string]interface{}) string { + return acctest.Nprintf(` + +data "google_project" "project" { + provider = google-beta + project_id = "%{project_id}" +} + +resource "google_compute_forwarding_rule" "default-https" { + provider = google-beta + + project = "%{project_id}" + region = "us-central1" + name = "https-frwd-rule-%{resource_suffix}" + load_balancing_scheme = "INTERNAL_MANAGED" + target = google_compute_region_target_https_proxy.default-https.self_link + network = google_compute_network.ilb_network.name + subnetwork = google_compute_subnetwork.ilb_subnet.name + ip_address = google_compute_address.consumer_address.id + ip_protocol = "TCP" + port_range = "443" + allow_global_access = "true" + depends_on = [google_compute_subnetwork.ilb_subnet2] +} + +resource "google_compute_region_backend_service" "default" { + provider = google-beta + + project = "%{project_id}" + region = "us-central1" + name = "backend-service-%{resource_suffix}" + protocol = "HTTPS" + port_name = "https-server" + load_balancing_scheme = "INTERNAL_MANAGED" + session_affinity = "HTTP_COOKIE" + health_checks = [google_compute_region_health_check.default.self_link] + locality_lb_policy = "RING_HASH" + + # websocket handling: https://stackoverflow.com/questions/63822612/websocket-connection-being-closed-on-google-compute-engine + timeout_sec = 600 + + consistent_hash { + http_cookie { + ttl { + # 24hr cookie ttl + seconds = 86400 + nanos = null + } + name = "X-CLIENT-SESSION" + path = null + } + http_header_name = null + minimum_ring_size = 1024 + } + + log_config { + enable = true + sample_rate = 1.0 + } +} + +resource "google_compute_region_health_check" "default" { + provider = google-beta + + project = "%{project_id}" + region = "us-central1" + name = "hc-%{resource_suffix}" + timeout_sec = 5 + check_interval_sec = 30 + healthy_threshold = 3 + unhealthy_threshold = 3 + + https_health_check { + port = 443 + request_path = "/health" + } +} + +resource "google_compute_region_target_https_proxy" "default-https" { + provider = google-beta + + project = "%{project_id}" + region = "us-central1" + name = "https-proxy-%{resource_suffix}" + url_map = google_compute_region_url_map.default-https.self_link + ssl_certificates = [google_compute_region_ssl_certificate.foobar0.self_link] + server_tls_policy = google_network_security_server_tls_policy.default.id +} + +resource "google_certificate_manager_trust_config" "default" { + provider = google-beta + + project = "%{project_id}" + location = "us-central1" + name = "trust-config-%{resource_suffix}" + + trust_stores { + trust_anchors { + pem_certificate = file("test-fixtures/ca_cert.pem") + } + intermediate_cas { + pem_certificate = file("test-fixtures/ca_cert.pem") + } + } +} + +resource "google_network_security_server_tls_policy" "default" { + provider = google-beta + + project = "%{project_id}" + location = "us-central1" + name = "tls-policy-%{resource_suffix}" + allow_open = "false" + mtls_policy { + client_validation_mode = "REJECT_INVALID" + client_validation_trust_config = "projects/${data.google_project.project.number}/locations/us-central1/trustConfigs/${google_certificate_manager_trust_config.default.name}" + } + + lifecycle { + create_before_destroy = true + } +} + +resource "google_compute_region_url_map" "default-https" { + provider = google-beta + + project = "%{project_id}" + region = "us-central1" + name = "lb-%{resource_suffix}" + default_service = google_compute_region_backend_service.default.id +} + +resource "google_compute_region_ssl_certificate" "foobar0" { + provider = google-beta + + name = "httpsproxy-test-cert0-%{resource_suffix}" + description = "very descriptive" + private_key = file("test-fixtures/test.key") + certificate = file("test-fixtures/test.crt") +} + +resource "google_compute_network" "ilb_network" { + provider = google-beta + + name = "tf-test-l4-ilb-network-%{resource_suffix}" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "ilb_subnet" { + provider = google-beta + + name = "tf-test-l4-ilb-subnet-%{resource_suffix}" + ip_cidr_range = "10.0.1.0/24" + region = "us-central1" + network = google_compute_network.ilb_network.id +} + +resource "google_compute_subnetwork" "ilb_subnet2" { + provider = google-beta + + name = "tf-test-l4-ilb-subnet2-%{resource_suffix}" + ip_cidr_range = "10.142.0.0/20" + region = "us-central1" + purpose = "REGIONAL_MANAGED_PROXY" + role = "ACTIVE" + network = google_compute_network.ilb_network.id +} + +resource "google_compute_address" "consumer_address" { + provider = google-beta + + name = "tf-test-website-ip-%{resource_suffix}-1" + region = "us-central1" + subnetwork = google_compute_subnetwork.ilb_subnet.id + address_type = "INTERNAL" +} +`, context) +} + +{{ end }} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_subnetwork_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_subnetwork_test.go.tmpl index 220c69f01509..2f298fdbc6ec 100644 --- a/mmv1/third_party/terraform/services/compute/go/resource_compute_subnetwork_test.go.tmpl +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_subnetwork_test.go.tmpl @@ -188,21 +188,85 @@ func TestAccComputeSubnetwork_secondaryIpRanges(t *testing.T) { ), }, { - Config: testAccComputeSubnetwork_secondaryIpRanges_update4(cnName, subnetworkName), + Config: testAccComputeSubnetwork_secondaryIpRanges_update1(cnName, subnetworkName), Check: resource.ComposeTestCheckFunc( testAccCheckComputeSubnetworkExists(t, "google_compute_subnetwork.network-with-private-secondary-ip-ranges", &subnetwork), - testAccCheckComputeSubnetworkHasNotSecondaryIpRange(&subnetwork, "tf-test-secondary-range-update1", "192.168.10.0/24"), + testAccCheckComputeSubnetworkHasSecondaryIpRange(&subnetwork, "tf-test-secondary-range-update1", "192.168.10.0/24"), testAccCheckComputeSubnetworkHasNotSecondaryIpRange(&subnetwork, "tf-test-secondary-range-update2", "192.168.11.0/24"), ), }, + }, + }) +} + +func TestAccComputeSubnetwork_secondaryIpRanges_sendEmpty(t *testing.T) { + t.Parallel() + + var subnetwork compute.Subnetwork + + cnName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + subnetworkName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeSubnetworkDestroyProducer(t), + Steps: []resource.TestStep{ + // Start without secondary_ip_range at all { - Config: testAccComputeSubnetwork_secondaryIpRanges_update1(cnName, subnetworkName), + Config: testAccComputeSubnetwork_sendEmpty_removed(cnName, subnetworkName, "true"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeSubnetworkExists(t, "google_compute_subnetwork.network-with-private-secondary-ip-ranges", &subnetwork), + ), + }, + // Add one secondary_ip_range + { + Config: testAccComputeSubnetwork_sendEmpty_single(cnName, subnetworkName, "true"), Check: resource.ComposeTestCheckFunc( testAccCheckComputeSubnetworkExists(t, "google_compute_subnetwork.network-with-private-secondary-ip-ranges", &subnetwork), testAccCheckComputeSubnetworkHasSecondaryIpRange(&subnetwork, "tf-test-secondary-range-update1", "192.168.10.0/24"), + ), + }, + // Remove it with send_secondary_ip_range_if_empty = true + { + Config: testAccComputeSubnetwork_sendEmpty_removed(cnName, subnetworkName, "true"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeSubnetworkExists(t, "google_compute_subnetwork.network-with-private-secondary-ip-ranges", &subnetwork), + testAccCheckComputeSubnetworkHasNotSecondaryIpRange(&subnetwork, "tf-test-secondary-range-update1", "192.168.10.0/24"), + ), + }, + // Apply two secondary_ip_range + { + Config: testAccComputeSubnetwork_sendEmpty_double(cnName, subnetworkName, "true"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeSubnetworkExists(t, "google_compute_subnetwork.network-with-private-secondary-ip-ranges", &subnetwork), + testAccCheckComputeSubnetworkHasSecondaryIpRange(&subnetwork, "tf-test-secondary-range-update1", "192.168.10.0/24"), + testAccCheckComputeSubnetworkHasSecondaryIpRange(&subnetwork, "tf-test-secondary-range-update2", "192.168.11.0/24"), + ), + }, + // Remove both with send_secondary_ip_range_if_empty = true + { + Config: testAccComputeSubnetwork_sendEmpty_removed(cnName, subnetworkName, "true"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeSubnetworkExists(t, "google_compute_subnetwork.network-with-private-secondary-ip-ranges", &subnetwork), + testAccCheckComputeSubnetworkHasNotSecondaryIpRange(&subnetwork, "tf-test-secondary-range-update1", "192.168.10.0/24"), testAccCheckComputeSubnetworkHasNotSecondaryIpRange(&subnetwork, "tf-test-secondary-range-update2", "192.168.11.0/24"), ), }, + // Apply one secondary_ip_range + { + Config: testAccComputeSubnetwork_sendEmpty_single(cnName, subnetworkName, "false"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeSubnetworkExists(t, "google_compute_subnetwork.network-with-private-secondary-ip-ranges", &subnetwork), + testAccCheckComputeSubnetworkHasSecondaryIpRange(&subnetwork, "tf-test-secondary-range-update1", "192.168.10.0/24"), + ), + }, + // Check removing without send_secondary_ip_range_if_empty produces no diff (normal computed behavior) + { + Config: testAccComputeSubnetwork_sendEmpty_removed(cnName, subnetworkName, "false"), + PlanOnly: true, + ExpectNonEmptyPlan: false, + }, }, }) } @@ -604,7 +668,7 @@ resource "google_compute_subnetwork" "network-with-private-secondary-ip-ranges" `, cnName, subnetworkName) } -func testAccComputeSubnetwork_secondaryIpRanges_update4(cnName, subnetworkName string) string { +func testAccComputeSubnetwork_sendEmpty_removed(cnName, subnetworkName, sendEmpty string) string { return fmt.Sprintf(` resource "google_compute_network" "custom-test" { name = "%s" @@ -616,9 +680,59 @@ resource "google_compute_subnetwork" "network-with-private-secondary-ip-ranges" ip_cidr_range = "10.2.0.0/16" region = "us-central1" network = google_compute_network.custom-test.self_link - secondary_ip_range = [] + send_secondary_ip_range_if_empty = "%s" } -`, cnName, subnetworkName) +`, cnName, subnetworkName, sendEmpty) +} + +func testAccComputeSubnetwork_sendEmpty_single(cnName, subnetworkName, sendEmpty string) string { + return fmt.Sprintf(` +resource "google_compute_network" "custom-test" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "network-with-private-secondary-ip-ranges" { + name = "%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = google_compute_network.custom-test.self_link + secondary_ip_range { + range_name = "tf-test-secondary-range-update2" + ip_cidr_range = "192.168.11.0/24" + } + secondary_ip_range { + range_name = "tf-test-secondary-range-update1" + ip_cidr_range = "192.168.10.0/24" + } + send_secondary_ip_range_if_empty = "%s" +} +`, cnName, subnetworkName, sendEmpty) +} + +func testAccComputeSubnetwork_sendEmpty_double(cnName, subnetworkName, sendEmpty string) string { + return fmt.Sprintf(` +resource "google_compute_network" "custom-test" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "network-with-private-secondary-ip-ranges" { + name = "%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = google_compute_network.custom-test.self_link + secondary_ip_range { + range_name = "tf-test-secondary-range-update2" + ip_cidr_range = "192.168.11.0/24" + } + secondary_ip_range { + range_name = "tf-test-secondary-range-update1" + ip_cidr_range = "192.168.10.0/24" + } + send_secondary_ip_range_if_empty = "%s" +} +`, cnName, subnetworkName, sendEmpty) } func testAccComputeSubnetwork_flowLogs(cnName, subnetworkName string) string { diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_target_https_proxy_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_target_https_proxy_test.go.tmpl index 4d037cfe92f9..4a32212c9ff3 100644 --- a/mmv1/third_party/terraform/services/compute/go/resource_compute_target_https_proxy_test.go.tmpl +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_target_https_proxy_test.go.tmpl @@ -17,8 +17,10 @@ import ( ) const ( - canonicalSslCertificateTemplate = "https://www.googleapis.com/compute/v1/projects/%s/global/sslCertificates/%s" - canonicalCertificateMapTemplate = "//certificatemanager.googleapis.com/projects/%s/locations/global/certificateMaps/%s" + canonicalSslCertificateTemplate = "https://www.googleapis.com/compute/v1/projects/%s/global/sslCertificates/%s" + canonicalSslPolicyTemplate = "https://www.googleapis.com/compute/v1/projects/%s/global/sslPolicies/%s" + canonicalCertificateMapTemplate = "//certificatemanager.googleapis.com/projects/%s/locations/global/certificateMaps/%s" + canonicalServerTlsPolicyTemplate = "//networksecurity.googleapis.com/projects/%s/locations/global/serverTlsPolicies/%s" ) func TestAccComputeTargetHttpsProxy_update(t *testing.T) { @@ -39,9 +41,10 @@ func TestAccComputeTargetHttpsProxy_update(t *testing.T) { t, "google_compute_target_https_proxy.foobar", &proxy), testAccComputeTargetHttpsProxyDescription("Resource created for Terraform acceptance testing", &proxy), testAccComputeTargetHttpsProxyHasSslCertificate(t, "tf-test-httpsproxy-cert1-"+resourceSuffix, &proxy), + testAccComputeTargetHttpsProxyHasServerTlsPolicy(t, "tf-test-server-tls-policy-"+resourceSuffix, &proxy), + testAccComputeTargetHttpsProxyHasSslPolicy(t, "tf-test-httpsproxy-sslpolicy-"+resourceSuffix, &proxy), ), }, - { Config: testAccComputeTargetHttpsProxy_basic2(resourceSuffix), Check: resource.ComposeTestCheckFunc( @@ -50,6 +53,8 @@ func TestAccComputeTargetHttpsProxy_update(t *testing.T) { testAccComputeTargetHttpsProxyDescription("Resource created for Terraform acceptance testing", &proxy), testAccComputeTargetHttpsProxyHasSslCertificate(t, "tf-test-httpsproxy-cert1-"+resourceSuffix, &proxy), testAccComputeTargetHttpsProxyHasSslCertificate(t, "tf-test-httpsproxy-cert2-"+resourceSuffix, &proxy), + testAccComputeTargetHttpsProxyHasSslPolicy(t, "tf-test-httpsproxy-sslpolicy2-"+resourceSuffix, &proxy), + testAccComputeTargetHttpsProxyHasNullServerTlsPolicy(t, &proxy), ), }, }, @@ -80,6 +85,45 @@ func TestAccComputeTargetHttpsProxy_certificateMap(t *testing.T) { }) } +func TestAccComputeTargetHttpsProxyServerTlsPolicy_update(t *testing.T) { + t.Parallel() + + var proxy compute.TargetHttpsProxy + resourceSuffix := acctest.RandString(t, 10) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeTargetHttpsProxyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeTargetHttpsProxyWithoutServerTlsPolicy(resourceSuffix), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeTargetHttpsProxyExists( + t, "google_compute_target_https_proxy.foobar", &proxy), + testAccComputeTargetHttpsProxyHasNullServerTlsPolicy(t, &proxy), + ), + }, + { + Config: testAccComputeTargetHttpsProxyWithServerTlsPolicy(resourceSuffix), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeTargetHttpsProxyExists( + t, "google_compute_target_https_proxy.foobar", &proxy), + testAccComputeTargetHttpsProxyHasServerTlsPolicy(t, "tf-test-server-tls-policy-"+resourceSuffix, &proxy), + ), + }, + { + Config: testAccComputeTargetHttpsProxyWithoutServerTlsPolicy(resourceSuffix), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeTargetHttpsProxyExists( + t, "google_compute_target_https_proxy.foobar", &proxy), + testAccComputeTargetHttpsProxyHasNullServerTlsPolicy(t, &proxy), + ), + }, + }, + }) +} + func testAccCheckComputeTargetHttpsProxyExists(t *testing.T, n string, proxy *compute.TargetHttpsProxy) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] @@ -115,6 +159,7 @@ func testAccComputeTargetHttpsProxyDescription(description string, proxy *comput if proxy.Description != description { return fmt.Errorf("Wrong description: expected '%s' got '%s'", description, proxy.Description) } + return nil } } @@ -130,7 +175,43 @@ func testAccComputeTargetHttpsProxyHasSslCertificate(t *testing.T, cert string, } } - return fmt.Errorf("Ssl certificate not found: expected'%s'", certUrl) + return fmt.Errorf("Ssl certificate not found: expected '%s'", certUrl) + } +} + +func testAccComputeTargetHttpsProxyHasSslPolicy(t *testing.T, sslPolicy string, proxy *compute.TargetHttpsProxy) resource.TestCheckFunc { + return func(s *terraform.State) error { + config := acctest.GoogleProviderConfig(t) + sslPolicyUrl := fmt.Sprintf(canonicalSslPolicyTemplate, config.Project, sslPolicy) + + if tpgresource.ConvertSelfLinkToV1(proxy.SslPolicy) == sslPolicyUrl { + return nil + } + + return fmt.Errorf("Ssl Policy not found: expected '%s'", sslPolicyUrl) + } +} + +func testAccComputeTargetHttpsProxyHasServerTlsPolicy(t *testing.T, policy string, proxy *compute.TargetHttpsProxy) resource.TestCheckFunc { + return func(s *terraform.State) error { + config := acctest.GoogleProviderConfig(t) + serverTlsPolicyUrl := fmt.Sprintf(canonicalServerTlsPolicyTemplate, config.Project, policy) + + if tpgresource.ConvertSelfLinkToV1(proxy.ServerTlsPolicy) == serverTlsPolicyUrl { + return nil + } + + return fmt.Errorf("Server Tls Policy not found: expected '%s'", serverTlsPolicyUrl) + } +} + +func testAccComputeTargetHttpsProxyHasNullServerTlsPolicy(t *testing.T, proxy *compute.TargetHttpsProxy) resource.TestCheckFunc { + return func(s *terraform.State) error { + if proxy.ServerTlsPolicy != "" { + return fmt.Errorf("Server Tls Policy found: expected 'null'") + } + + return nil } } @@ -143,18 +224,21 @@ func testAccComputeTargetHttpsProxyHasCertificateMap(t *testing.T, certificateMa return nil } - return fmt.Errorf("certificate map not found: expected'%s'", certificateMapUrl) + return fmt.Errorf("certificate map not found: expected '%s'", certificateMapUrl) } } func testAccComputeTargetHttpsProxy_basic1(id string) string { return fmt.Sprintf(` +data "google_project" "project" {} + resource "google_compute_target_https_proxy" "foobar" { - description = "Resource created for Terraform acceptance testing" - name = "tf-test-httpsproxy-%s" - url_map = google_compute_url_map.foobar.self_link - ssl_certificates = [google_compute_ssl_certificate.foobar1.self_link] - ssl_policy = google_compute_ssl_policy.foobar.self_link + description = "Resource created for Terraform acceptance testing" + name = "tf-test-httpsproxy-%s" + url_map = google_compute_url_map.foobar.self_link + ssl_certificates = [google_compute_ssl_certificate.foobar1.self_link] + ssl_policy = google_compute_ssl_policy.foobar.self_link + server_tls_policy = google_network_security_server_tls_policy.server_tls_policy.id } resource "google_compute_backend_service" "foobar" { @@ -192,7 +276,7 @@ resource "google_compute_url_map" "foobar" { } resource "google_compute_ssl_policy" "foobar" { - name = "tf-test-sslproxy-%s" + name = "tf-test-httpsproxy-sslpolicy-%s" description = "my-description" min_tls_version = "TLS_1_2" profile = "MODERN" @@ -211,7 +295,25 @@ resource "google_compute_ssl_certificate" "foobar2" { private_key = file("test-fixtures/test.key") certificate = file("test-fixtures/test.crt") } -`, id, id, id, id, id, id, id) + +resource "google_certificate_manager_trust_config" "trust_config" { + name = "tf-test-trust-config-%s" + location = "global" + + allowlisted_certificates { + pem_certificate = file("test-fixtures/cert.pem") + } +} + +resource "google_network_security_server_tls_policy" "server_tls_policy" { + name = "tf-test-server-tls-policy-%s" + + mtls_policy { + client_validation_trust_config = "projects/${data.google_project.project.number}/locations/global/trustConfigs/${google_certificate_manager_trust_config.trust_config.name}" + client_validation_mode = "ALLOW_INVALID_OR_MISSING_CLIENT_CERT" + } +} +`, id, id, id, id, id, id, id, id, id) } func testAccComputeTargetHttpsProxy_basic2(id string) string { @@ -224,8 +326,10 @@ resource "google_compute_target_https_proxy" "foobar" { google_compute_ssl_certificate.foobar1.self_link, google_compute_ssl_certificate.foobar2.self_link, ] - quic_override = "ENABLE" - tls_early_data = "STRICT" + ssl_policy = google_compute_ssl_policy.foobar2.self_link + quic_override = "ENABLE" + tls_early_data = "STRICT" + server_tls_policy = null } resource "google_compute_backend_service" "foobar" { @@ -262,8 +366,8 @@ resource "google_compute_url_map" "foobar" { } } -resource "google_compute_ssl_policy" "foobar" { - name = "tf-test-sslproxy-%s" +resource "google_compute_ssl_policy" "foobar2" { + name = "tf-test-httpsproxy-sslpolicy2-%s" description = "my-description" min_tls_version = "TLS_1_2" profile = "MODERN" @@ -288,9 +392,9 @@ resource "google_compute_ssl_certificate" "foobar2" { func testAccComputeTargetHttpsProxy_certificateMap(id string) string { return fmt.Sprintf(` resource "google_compute_target_https_proxy" "foobar" { - description = "Resource created for Terraform acceptance testing" - name = "tf-test-httpsproxy-%s" - url_map = google_compute_url_map.foobar.self_link + description = "Resource created for Terraform acceptance testing" + name = "tf-test-httpsproxy-%s" + url_map = google_compute_url_map.foobar.self_link certificate_map = "//certificatemanager.googleapis.com/${google_certificate_manager_certificate_map.map.id}" } @@ -339,6 +443,102 @@ resource "google_certificate_manager_dns_authorization" "instance" { name = "tf-test-dnsauthz-%s" domain = "mysite.com" } - `, id, id, id, id, id, id, id, id) } + +func testAccComputeTargetHttpsProxyWithoutServerTlsPolicy(id string) string { + return fmt.Sprintf(` +data "google_project" "project" {} + +resource "google_compute_target_https_proxy" "foobar" { + description = "Resource created for Terraform acceptance testing" + name = "tf-test-httpsproxy-%s" + url_map = google_compute_url_map.foobar.self_link + ssl_certificates = [google_compute_ssl_certificate.foobar.self_link] +} + +resource "google_compute_backend_service" "foobar" { + name = "tf-test-httpsproxy-backend-%s" + health_checks = [google_compute_http_health_check.zero.self_link] +} + +resource "google_compute_http_health_check" "zero" { + name = "tf-test-httpsproxy-check-%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} + +resource "google_compute_url_map" "foobar" { + name = "tf-test-httpsproxy-urlmap-%s" + default_service = google_compute_backend_service.foobar.self_link +} + +resource "google_compute_ssl_certificate" "foobar" { + name = "tf-test-httpsproxy-cert-%s" + description = "very descriptive" + private_key = file("test-fixtures/test.key") + certificate = file("test-fixtures/test.crt") +} +`, id, id, id, id, id) +} + +func testAccComputeTargetHttpsProxyWithServerTlsPolicy(id string) string { + return fmt.Sprintf(` +data "google_project" "project" {} + +resource "google_compute_target_https_proxy" "foobar" { + description = "Resource created for Terraform acceptance testing" + name = "tf-test-httpsproxy-%s" + url_map = google_compute_url_map.foobar.self_link + ssl_certificates = [google_compute_ssl_certificate.foobar.self_link] + server_tls_policy = google_network_security_server_tls_policy.server_tls_policy.id +} + +resource "google_compute_backend_service" "foobar" { + name = "tf-test-httpsproxy-backend-%s" + health_checks = [google_compute_http_health_check.zero.self_link] +} + +resource "google_compute_http_health_check" "zero" { + name = "tf-test-httpsproxy-check-%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} + +resource "google_compute_url_map" "foobar" { + name = "tf-test-httpsproxy-urlmap-%s" + default_service = google_compute_backend_service.foobar.self_link +} + +resource "google_compute_ssl_certificate" "foobar" { + name = "tf-test-httpsproxy-cert-%s" + description = "very descriptive" + private_key = file("test-fixtures/test.key") + certificate = file("test-fixtures/test.crt") +} + +resource "google_certificate_manager_trust_config" "trust_config" { + name = "tf-test-trust-config-%s" + location = "global" + + allowlisted_certificates { + pem_certificate = file("test-fixtures/cert.pem") + } +} + +resource "google_network_security_server_tls_policy" "server_tls_policy" { + name = "tf-test-server-tls-policy-%s" + + mtls_policy { + client_validation_trust_config = "projects/${data.google_project.project.number}/locations/global/trustConfigs/${google_certificate_manager_trust_config.trust_config.name}" + client_validation_mode = "ALLOW_INVALID_OR_MISSING_CLIENT_CERT" + } + + lifecycle { + create_before_destroy = true + } +} +`, id, id, id, id, id, id, id) +} diff --git a/mmv1/third_party/terraform/services/container/go/node_config.go.tmpl b/mmv1/third_party/terraform/services/container/go/node_config.go.tmpl index 08a6419af17c..c5e1425a1f0a 100644 --- a/mmv1/third_party/terraform/services/container/go/node_config.go.tmpl +++ b/mmv1/third_party/terraform/services/container/go/node_config.go.tmpl @@ -77,6 +77,18 @@ func schemaContainerdConfig() *schema.Schema { } } +// Note: this is a bool internally, but implementing as an enum internally to +// make it easier to accept API level defaults. +func schemaInsecureKubeletReadonlyPortEnabled() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: "Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.", + ValidateFunc: validation.StringInSlice([]string{"FALSE","TRUE"}, false), + } +} + func schemaLoggingVariant() *schema.Schema { return &schema.Schema{ Type: schema.TypeString, @@ -138,9 +150,6 @@ func schemaNodeConfig() *schema.Schema { Optional: true, Computed: true, ForceNew: true, - // Legacy config mode allows removing GPU's from an existing resource - // See https://www.terraform.io/docs/configuration/attr-as-blocks.html - ConfigMode: schema.SchemaConfigModeAttr, Description: `List of the type and count of accelerator cards attached to the instance.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -163,7 +172,6 @@ func schemaNodeConfig() *schema.Schema { Optional: true, Computed: true, ForceNew: true, - ConfigMode: schema.SchemaConfigModeAttr, Description: `Configuration for auto installation of GPU driver.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -188,7 +196,6 @@ func schemaNodeConfig() *schema.Schema { MaxItems: 1, Optional: true, ForceNew: true, - ConfigMode: schema.SchemaConfigModeAttr, Description: `Configuration for GPU sharing.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -601,6 +608,7 @@ func schemaNodeConfig() *schema.Schema { Optional: true, Description: `Set the CPU CFS quota period value 'cpu.cfs_period_us'.`, }, + "insecure_kubelet_readonly_port_enabled": schemaInsecureKubeletReadonlyPortEnabled(), "pod_pids_limit": { Type: schema.TypeInt, Optional: true, @@ -772,8 +780,24 @@ func schemaNodeConfig() *schema.Schema { } } +// Separate since this currently only supports a single value -- a subset of +// the overall NodeKubeletConfig +func schemaNodePoolAutoConfigNodeKubeletConfig() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `Node kubelet configs.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "insecure_kubelet_readonly_port_enabled": schemaInsecureKubeletReadonlyPortEnabled(), + }, + }, + } +} + func expandNodeConfigDefaults(configured interface{}) *container.NodeConfigDefaults { - configs := configured.([]interface{}) + configs := configured.([]interface{}) if len(configs) == 0 || configs[0] == nil { return nil } @@ -781,6 +805,12 @@ func expandNodeConfigDefaults(configured interface{}) *container.NodeConfigDefau nodeConfigDefaults := &container.NodeConfigDefaults{} nodeConfigDefaults.ContainerdConfig = expandContainerdConfig(config["containerd_config"]) + if v, ok := config["insecure_kubelet_readonly_port_enabled"]; ok { + nodeConfigDefaults.NodeKubeletConfig = &container.NodeKubeletConfig{ + InsecureKubeletReadonlyPortEnabled: expandInsecureKubeletReadonlyPortEnabled(v), + ForceSendFields: []string{"InsecureKubeletReadonlyPortEnabled"}, + } + } if variant, ok := config["logging_variant"]; ok { nodeConfigDefaults.LoggingConfig = &container.NodePoolLoggingConfig{ VariantConfig: &container.LoggingVariantConfig{ @@ -789,14 +819,14 @@ func expandNodeConfigDefaults(configured interface{}) *container.NodeConfigDefau } } {{- if ne $.TargetVersionName "ga" }} - if v, ok := config["gcfs_config"]; ok && len(v.([]interface{})) > 0 { - gcfsConfig := v.([]interface{})[0].(map[string]interface{}) + if v, ok := config["gcfs_config"]; ok && len(v.([]interface{})) > 0 { + gcfsConfig := v.([]interface{})[0].(map[string]interface{}) nodeConfigDefaults.GcfsConfig = &container.GcfsConfig{ Enabled: gcfsConfig["enabled"].(bool), } } {{- end }} - return nodeConfigDefaults + return nodeConfigDefaults } func expandNodeConfig(v interface{}) *container.NodeConfig { @@ -1138,6 +1168,13 @@ func expandWorkloadMetadataConfig(v interface{}) *container.WorkloadMetadataConf return wmc } +func expandInsecureKubeletReadonlyPortEnabled(v interface{}) bool { + if v == "TRUE" { + return true + } + return false +} + func expandKubeletConfig(v interface{}) *container.NodeKubeletConfig { if v == nil { return nil @@ -1158,6 +1195,10 @@ func expandKubeletConfig(v interface{}) *container.NodeKubeletConfig { if cpuCfsQuotaPeriod, ok := cfg["cpu_cfs_quota_period"]; ok { kConfig.CpuCfsQuotaPeriod = cpuCfsQuotaPeriod.(string) } + if insecureKubeletReadonlyPortEnabled, ok := cfg["insecure_kubelet_readonly_port_enabled"]; ok { + kConfig.InsecureKubeletReadonlyPortEnabled = expandInsecureKubeletReadonlyPortEnabled(insecureKubeletReadonlyPortEnabled) + kConfig.ForceSendFields = append(kConfig.ForceSendFields, "InsecureKubeletReadonlyPortEnabled") + } if podPidsLimit, ok := cfg["pod_pids_limit"]; ok { kConfig.PodPidsLimit = int64(podPidsLimit.(int)) } @@ -1366,6 +1407,8 @@ func flattenNodeConfigDefaults(c *container.NodeConfigDefaults) []map[string]int result[0]["containerd_config"] = flattenContainerdConfig(c.ContainerdConfig) + result[0]["insecure_kubelet_readonly_port_enabled"] = flattenInsecureKubeletReadonlyPortEnabled(c.NodeKubeletConfig) + result[0]["logging_variant"] = flattenLoggingVariant(c.LoggingConfig) {{ if ne $.TargetVersionName `ga` -}} @@ -1557,6 +1600,14 @@ func flattenSecondaryBootDisks(c []*container.SecondaryBootDisk) []map[string]in return result } +func flattenInsecureKubeletReadonlyPortEnabled(c *container.NodeKubeletConfig) string { + // Convert bool from the API to the enum values used internally + if c != nil && c.InsecureKubeletReadonlyPortEnabled { + return "TRUE" + } + return "FALSE" +} + func flattenLoggingVariant(c *container.NodePoolLoggingConfig) string { variant := "DEFAULT" if c != nil && c.VariantConfig != nil && c.VariantConfig.Variant != "" { @@ -1706,10 +1757,21 @@ func flattenKubeletConfig(c *container.NodeKubeletConfig) []map[string]interface result := []map[string]interface{}{} if c != nil { result = append(result, map[string]interface{}{ - "cpu_cfs_quota": c.CpuCfsQuota, - "cpu_cfs_quota_period": c.CpuCfsQuotaPeriod, - "cpu_manager_policy": c.CpuManagerPolicy, - "pod_pids_limit": c.PodPidsLimit, + "cpu_cfs_quota": c.CpuCfsQuota, + "cpu_cfs_quota_period": c.CpuCfsQuotaPeriod, + "cpu_manager_policy": c.CpuManagerPolicy, + "insecure_kubelet_readonly_port_enabled": flattenInsecureKubeletReadonlyPortEnabled(c), + "pod_pids_limit": c.PodPidsLimit, + }) + } + return result +} + +func flattenNodePoolAutoConfigNodeKubeletConfig(c *container.NodeKubeletConfig) []map[string]interface{} { + result := []map[string]interface{}{} + if c != nil { + result = append(result, map[string]interface{}{ + "insecure_kubelet_readonly_port_enabled": flattenInsecureKubeletReadonlyPortEnabled(c), }) } return result diff --git a/mmv1/third_party/terraform/services/container/go/resource_container_cluster.go.tmpl b/mmv1/third_party/terraform/services/container/go/resource_container_cluster.go.tmpl index 84078431c4e5..9707d9febc5b 100644 --- a/mmv1/third_party/terraform/services/container/go/resource_container_cluster.go.tmpl +++ b/mmv1/third_party/terraform/services/container/go/resource_container_cluster.go.tmpl @@ -161,6 +161,7 @@ func clusterSchemaNodePoolDefaults() *schema.Schema { {{- if ne $.TargetVersionName "ga" }} "gcfs_config": schemaGcfsConfig(false), {{- end }} + "insecure_kubelet_readonly_port_enabled": schemaInsecureKubeletReadonlyPortEnabled(), "logging_variant": schemaLoggingVariant(), }, }, @@ -217,6 +218,7 @@ func ResourceContainerCluster() *schema.Resource { containerClusterSurgeSettingsCustomizeDiff, containerClusterEnableK8sBetaApisCustomizeDiff, containerClusterNodeVersionCustomizeDiff, + tpgresource.SetDiffForLabelsWithCustomizedName("resource_labels"), ), Timeouts: &schema.ResourceTimeout{ @@ -1289,20 +1291,9 @@ func ResourceContainerCluster() *schema.Resource { Description: `Whether or not the advanced datapath metrics are enabled.`, }, "enable_relay": { - Type: schema.TypeBool, - Optional: true, - Description: `Whether or not Relay is enabled.`, - Default: false, - ConflictsWith: []string{"monitoring_config.0.advanced_datapath_observability_config.0.relay_mode"}, - }, - "relay_mode": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Deprecated: "Deprecated in favor of enable_relay field. Remove this attribute's configuration as this field will be removed in the next major release and enable_relay will become a required field.", - Description: `Mode used to make Relay available.`, - ValidateFunc: validation.StringInSlice([]string{"DISABLED", "INTERNAL_VPC_LB", "EXTERNAL_LB"}, false), - ConflictsWith: []string{"monitoring_config.0.advanced_datapath_observability_config.0.enable_relay"}, + Type: schema.TypeBool, + Required: true, + Description: `Whether or not Relay is enabled.`, }, }, }, @@ -1513,6 +1504,7 @@ func ResourceContainerCluster() *schema.Resource { Description: `Node pool configs that apply to all auto-provisioned node pools in autopilot clusters and node auto-provisioning enabled clusters.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "node_kubelet_config": schemaNodePoolAutoConfigNodeKubeletConfig(), "network_tags": { Type: schema.TypeList, Optional: true, @@ -1563,7 +1555,6 @@ func ResourceContainerCluster() *schema.Resource { }, }, {{- end }} -{{- if ne $.TargetVersionName "ga" }} "secret_manager_config": { Type: schema.TypeList, Optional: true, @@ -1580,7 +1571,6 @@ func ResourceContainerCluster() *schema.Resource { }, }, }, -{{- end }} "project": { Type: schema.TypeString, @@ -1821,7 +1811,22 @@ func ResourceContainerCluster() *schema.Resource { Type: schema.TypeMap, Optional: true, Elem: &schema.Schema{Type: schema.TypeString}, - Description: `The GCE resource labels (a map of key/value pairs) to be applied to the cluster.`, + Description: `The GCE resource labels (a map of key/value pairs) to be applied to the cluster. + + **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. + Please refer to the field 'effective_labels' for all of the labels present on the resource.`, + }, + "terraform_labels": { + Type: schema.TypeMap, + Computed: true, + Description: `The combination of labels configured directly on the resource and default labels configured on the provider.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "effective_labels": { + Type: schema.TypeMap, + Computed: true, + Description: `All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.`, + Elem: &schema.Schema{Type: schema.TypeString}, }, "label_fingerprint": { @@ -1960,12 +1965,13 @@ func ResourceContainerCluster() *schema.Resource { "channel": { Type: schema.TypeString, Required: true, - ValidateFunc: validation.StringInSlice([]string{"UNSPECIFIED", "RAPID", "REGULAR", "STABLE"}, false), + ValidateFunc: validation.StringInSlice([]string{"UNSPECIFIED", "RAPID", "REGULAR", "STABLE", "EXTENDED"}, false), Description: `The selected release channel. Accepted values are: * UNSPECIFIED: Not set. * RAPID: Weekly upgrade cadence; Early testers and developers who requires new features. * REGULAR: Multiple per month upgrade cadence; Production users who need features not yet offered in the Stable channel. -* STABLE: Every few months upgrade cadence; Production users who need stability above all else, and for whom frequent upgrades are too risky.`, +* STABLE: Every few months upgrade cadence; Production users who need stability above all else, and for whom frequent upgrades are too risky. +* EXTENDED: GKE provides extended support for Kubernetes minor versions through the Extended channel. With this channel, you can stay on a minor version for up to 24 months.`, }, }, }, @@ -2235,12 +2241,6 @@ func ResourceContainerCluster() *schema.Resource { // One quirk with this approach is that configs with mixed count=0 and count>0 accelerator blocks will // show a confusing diff if one of there are config changes that result in a legitimate diff as the count=0 // blocks will not be in state. -// -// This could also be modelled by setting `guest_accelerator = []` in the config. However since the -// previous syntax requires that schema.SchemaConfigModeAttr is set on the field it is advisable that -// we have a work around for removing guest accelerators. Also Terraform 0.11 cannot use dynamic blocks -// so this isn't a solution for module authors who want to dynamically omit guest accelerators -// See https://github.com/hashicorp/terraform-provider-google/issues/3786 func resourceNodeConfigEmptyGuestAccelerator(_ context.Context, diff *schema.ResourceDiff, meta interface{}) error { old, new := diff.GetChange("node_config.0.guest_accelerator") oList := old.([]interface{}) @@ -2352,9 +2352,7 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er {{- if ne $.TargetVersionName "ga" }} PodSecurityPolicyConfig: expandPodSecurityPolicyConfig(d.Get("pod_security_policy_config")), {{- end }} -{{- if ne $.TargetVersionName "ga" }} - SecretManagerConfig: expandSecretManagerConfig(d.Get("secret_manager_config")), -{{- end }} + SecretManagerConfig: expandSecretManagerConfig(d.Get("secret_manager_config")), Autoscaling: expandClusterAutoscaling(d.Get("cluster_autoscaling"), d), BinaryAuthorization: expandBinaryAuthorization(d.Get("binary_authorization")), Autopilot: &container.Autopilot{ @@ -2384,7 +2382,7 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er MasterAuth: expandMasterAuth(d.Get("master_auth")), NotificationConfig: expandNotificationConfig(d.Get("notification_config")), ConfidentialNodes: expandConfidentialNodes(d.Get("confidential_nodes")), - ResourceLabels: tpgresource.ExpandStringMap(d, "resource_labels"), + ResourceLabels: tpgresource.ExpandStringMap(d, "effective_labels"), NodePoolAutoConfig: expandNodePoolAutoConfig(d.Get("node_pool_auto_config")), {{- if ne $.TargetVersionName "ga" }} ProtectConfig: expandProtectConfig(d.Get("protect_config")), @@ -2996,14 +2994,20 @@ func resourceContainerClusterRead(d *schema.ResourceData, meta interface{}) erro if err := d.Set("cluster_telemetry", flattenClusterTelemetry(cluster.ClusterTelemetry)); err != nil { return err } +{{- end }} if err := d.Set("secret_manager_config", flattenSecretManagerConfig(cluster.SecretManagerConfig)); err != nil { return err } -{{- end }} - if err := d.Set("resource_labels", cluster.ResourceLabels); err != nil { - return fmt.Errorf("Error setting resource_labels: %s", err) + if err := tpgresource.SetLabels(cluster.ResourceLabels, d, "resource_labels"); err != nil { + return fmt.Errorf("Error setting labels: %s", err) + } + if err := tpgresource.SetLabels(cluster.ResourceLabels, d, "terraform_labels"); err != nil { + return fmt.Errorf("Error setting terraform_labels: %s", err) + } + if err := d.Set("effective_labels", cluster.ResourceLabels); err != nil { + return fmt.Errorf("Error setting effective_labels: %s", err) } if err := d.Set("label_fingerprint", cluster.LabelFingerprint); err != nil { return fmt.Errorf("Error setting label_fingerprint: %s", err) @@ -3824,6 +3828,60 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er log.Printf("[INFO] GKE cluster %s: image type has been updated to %s", d.Id(), it) } + + if d.HasChange("node_config.0.kubelet_config") { + + defaultPool := "default-pool" + + timeout := d.Timeout(schema.TimeoutCreate) + + nodePoolInfo, err := extractNodePoolInformationFromCluster(d, config, clusterName) + if err != nil { + return err + } + + // Acquire write-lock on nodepool. + npLockKey := nodePoolInfo.nodePoolLockKey(defaultPool) + + // Note: probably long term this should be handled broadly for all the + // items in kubelet_config in a simpler / DRYer way. + // See b/361634104 + if d.HasChange("node_config.0.kubelet_config.0.insecure_kubelet_readonly_port_enabled") { + it := d.Get("node_config.0.kubelet_config.0.insecure_kubelet_readonly_port_enabled").(string) + + // While we're getting the value from the drepcated field in + // node_config.kubelet_config, the actual setting that needs to be updated + // is on the default nodepool. + req := &container.UpdateNodePoolRequest{ + Name: defaultPool, + KubeletConfig: &container.NodeKubeletConfig{ + InsecureKubeletReadonlyPortEnabled: expandInsecureKubeletReadonlyPortEnabled(it), + ForceSendFields: []string{"InsecureKubeletReadonlyPortEnabled"}, + }, + } + + updateF := func() error { + clusterNodePoolsUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(defaultPool), req) + if config.UserProjectOverride { + clusterNodePoolsUpdateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) + } + op, err := clusterNodePoolsUpdateCall.Do() + if err != nil { + return err + } + + // Wait until it's updated + return ContainerOperationWait(config, op, nodePoolInfo.project, nodePoolInfo.location, + "updating GKE node pool insecure_kubelet_readonly_port_enabled", userAgent, timeout) + } + + if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] GKE cluster %s: default-pool setting for insecure_kubelet_readonly_port_enabled updated to %s", d.Id(), it) + } + } } if d.HasChange("notification_config") { @@ -3986,7 +4044,6 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er } {{- end }} -{{ if ne $.TargetVersionName `ga` -}} if d.HasChange("secret_manager_config") { c := d.Get("secret_manager_config") req := &container.UpdateClusterRequest{ @@ -4013,7 +4070,6 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er } log.Printf("[INFO] GKE cluster %s secret manager csi add-on has been updated", d.Id()) } -{{- end }} if d.HasChange("workload_identity_config") { // Because GKE uses a non-RESTful update function, when removing the @@ -4095,8 +4151,8 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er log.Printf("[INFO] GKE cluster %s monitoring config has been updated", d.Id()) } - if d.HasChange("resource_labels") { - resourceLabels := d.Get("resource_labels").(map[string]interface{}) + if d.HasChange("effective_labels") { + resourceLabels := d.Get("effective_labels").(map[string]interface{}) labelFingerprint := d.Get("label_fingerprint").(string) req := &container.SetLabelsRequest{ ResourceLabels: tpgresource.ConvertStringMap(resourceLabels), @@ -4250,6 +4306,28 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er } } + if d.HasChange("node_pool_defaults") && d.HasChange("node_pool_defaults.0.node_config_defaults.0.insecure_kubelet_readonly_port_enabled") { + if v, ok := d.GetOk("node_pool_defaults.0.node_config_defaults.0.insecure_kubelet_readonly_port_enabled"); ok { + insecureKubeletReadonlyPortEnabled := v.(string) + req := &container.UpdateClusterRequest{ + Update: &container.ClusterUpdate{ + DesiredNodeKubeletConfig: &container.NodeKubeletConfig{ + InsecureKubeletReadonlyPortEnabled: expandInsecureKubeletReadonlyPortEnabled(insecureKubeletReadonlyPortEnabled), + ForceSendFields: []string{"InsecureKubeletReadonlyPortEnabled"}, + }, + }, + } + + updateF := updateFunc(req, "updating GKE cluster desired node pool insecure kubelet readonly port configuration defaults.") + // Call update serially. + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] GKE cluster %s node pool insecure_kubelet_readonly_port_enabled default has been updated", d.Id()) + } + } + if d.HasChange("node_pool_defaults") && d.HasChange("node_pool_defaults.0.node_config_defaults.0.logging_variant") { if v, ok := d.GetOk("node_pool_defaults.0.node_config_defaults.0.logging_variant"); ok { loggingVariant := v.(string) @@ -4325,6 +4403,24 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er } } + if d.HasChange("node_pool_auto_config.0.node_kubelet_config") { + req := &container.UpdateClusterRequest{ + Update: &container.ClusterUpdate{ + DesiredNodePoolAutoConfigKubeletConfig: expandKubeletConfig( + d.Get("node_pool_auto_config.0.node_kubelet_config"), + ), + }, + } + + updateF := updateFunc(req, "updating GKE cluster node pool auto config node_kubelet_config parameters") + // Call update serially. + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] GKE cluster %s node pool auto config node_kubelet_config parameters have been updated", d.Id()) + } + if d.HasChange("node_pool_auto_config.0.network_tags.0.tags") { tags := d.Get("node_pool_auto_config.0.network_tags.0.tags").([]interface{}) @@ -4876,6 +4972,7 @@ func expandClusterAutoscaling(configured interface{}, d *schema.ResourceData) *c ResourceLimits: resourceLimits, AutoscalingProfile: config["autoscaling_profile"].(string), AutoprovisioningNodePoolDefaults: expandAutoProvisioningDefaults(config["auto_provisioning_defaults"], d), + AutoprovisioningLocations: tpgresource.ConvertStringArr(config["auto_provisioning_locations"].([]interface{})), } } @@ -5410,7 +5507,6 @@ func expandPodSecurityPolicyConfig(configured interface{}) *container.PodSecurit } {{- end }} -{{ if ne $.TargetVersionName `ga` -}} func expandSecretManagerConfig(configured interface{}) *container.SecretManagerConfig { l := configured.([]interface{}) if len(l) == 0 || l[0] == nil { @@ -5423,7 +5519,6 @@ func expandSecretManagerConfig(configured interface{}) *container.SecretManagerC ForceSendFields: []string{"Enabled"}, } } -{{- end }} func expandDefaultMaxPodsConstraint(v interface{}) *container.MaxPodsConstraint { if v == nil { @@ -5581,21 +5676,10 @@ func expandMonitoringConfig(configured interface{}) *container.MonitoringConfig if v, ok := config["advanced_datapath_observability_config"]; ok && len(v.([]interface{})) > 0 { advanced_datapath_observability_config := v.([]interface{})[0].(map[string]interface{}) - mc.AdvancedDatapathObservabilityConfig = &container.AdvancedDatapathObservabilityConfig{ EnableMetrics: advanced_datapath_observability_config["enable_metrics"].(bool), - } - - enable_relay := advanced_datapath_observability_config["enable_relay"].(bool) - relay_mode := advanced_datapath_observability_config["relay_mode"].(string) - if enable_relay { - mc.AdvancedDatapathObservabilityConfig.EnableRelay = enable_relay - } else if relay_mode == "INTERNAL_VPC_LB" || relay_mode == "EXTERNAL_LB" { - mc.AdvancedDatapathObservabilityConfig.RelayMode = relay_mode - } else { - mc.AdvancedDatapathObservabilityConfig.EnableRelay = enable_relay - mc.AdvancedDatapathObservabilityConfig.RelayMode = "DISABLED" - mc.AdvancedDatapathObservabilityConfig.ForceSendFields = []string{"EnableRelay"} + EnableRelay: advanced_datapath_observability_config["enable_relay"].(bool), + ForceSendFields: []string{"EnableRelay"}, } } @@ -5671,6 +5755,10 @@ func expandNodePoolAutoConfig(configured interface{}) *container.NodePoolAutoCon npac := &container.NodePoolAutoConfig{} config := l[0].(map[string]interface{}) + if v, ok := config["node_kubelet_config"]; ok { + npac.NodeKubeletConfig = expandKubeletConfig(v) + } + if v, ok := config["network_tags"]; ok && len(v.([]interface{})) > 0 { npac.NetworkTags = expandNodePoolAutoConfigNetworkTags(v) } @@ -6187,6 +6275,7 @@ func flattenClusterAutoscaling(a *container.ClusterAutoscaling) []map[string]int r["resource_limits"] = resourceLimits r["enabled"] = true r["auto_provisioning_defaults"] = flattenAutoProvisioningDefaults(a.AutoprovisioningNodePoolDefaults) + r["auto_provisioning_locations"] = a.AutoprovisioningLocations } else { r["enabled"] = false } @@ -6308,7 +6397,6 @@ func flattenPodSecurityPolicyConfig(c *container.PodSecurityPolicyConfig) []map[ {{ end }} -{{ if ne $.TargetVersionName `ga` -}} func flattenSecretManagerConfig(c *container.SecretManagerConfig) []map[string]interface{} { if c == nil { return []map[string]interface{}{ @@ -6324,7 +6412,6 @@ func flattenSecretManagerConfig(c *container.SecretManagerConfig) []map[string]i } } -{{ end }} func flattenResourceUsageExportConfig(c *container.ResourceUsageExportConfig) []map[string]interface{} { if c == nil { @@ -6488,29 +6575,10 @@ func flattenAdvancedDatapathObservabilityConfig(c *container.AdvancedDatapathObs return nil } - if c.EnableRelay { - return []map[string]interface{}{ - { - "enable_metrics": c.EnableMetrics, - "enable_relay": c.EnableRelay, - }, - } - } - - if c.RelayMode == "INTERNAL_VPC_LB" || c.RelayMode == "EXTERNAL_LB" { - return []map[string]interface{}{ - { - "enable_metrics": c.EnableMetrics, - "relay_mode": c.RelayMode, - }, - } - } - return []map[string]interface{}{ { "enable_metrics": c.EnableMetrics, - "enable_relay": false, - "relay_mode": "DISABLED", + "enable_relay": c.EnableRelay, }, } } @@ -6529,6 +6597,9 @@ func flattenNodePoolAutoConfig(c *container.NodePoolAutoConfig) []map[string]int } result := make(map[string]interface{}) + if c.NodeKubeletConfig != nil { + result["node_kubelet_config"] = flattenNodePoolAutoConfigNodeKubeletConfig(c.NodeKubeletConfig) + } if c.NetworkTags != nil { result["network_tags"] = flattenNodePoolAutoConfigNetworkTags(c.NetworkTags) } @@ -6773,7 +6844,6 @@ func podSecurityPolicyCfgSuppress(k, old, new string, r *schema.ResourceData) bo } {{- end }} -{{ if ne $.TargetVersionName `ga` -}} func SecretManagerCfgSuppress(k, old, new string, r *schema.ResourceData) bool { if k == "secret_manager_config.#" && old == "1" && new == "0" { if v, ok := r.GetOk("secret_manager_config"); ok { @@ -6787,7 +6857,6 @@ func SecretManagerCfgSuppress(k, old, new string, r *schema.ResourceData) bool { } return false } -{{- end }} func containerClusterNetworkPolicyDiffSuppress(k, old, new string, r *schema.ResourceData) bool { // if network_policy configuration is empty, we store it as populated and enabled=false, and diff --git a/mmv1/third_party/terraform/services/container/go/resource_container_cluster_migratev1.go.tmpl b/mmv1/third_party/terraform/services/container/go/resource_container_cluster_migratev1.go.tmpl index e823197d5234..52311225f650 100644 --- a/mmv1/third_party/terraform/services/container/go/resource_container_cluster_migratev1.go.tmpl +++ b/mmv1/third_party/terraform/services/container/go/resource_container_cluster_migratev1.go.tmpl @@ -559,6 +559,13 @@ func resourceContainerClusterResourceV1() *schema.Resource { }, }, }, + "auto_provisioning_locations": { + Type: schema.TypeList, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `The list of Google Compute Engine zones in which the NodePool's nodes can be created by NAP.`, + }, {{- if ne $.TargetVersionName "ga" }} "autoscaling_profile": { Type: schema.TypeString, diff --git a/mmv1/third_party/terraform/services/container/go/resource_container_cluster_test.go.tmpl b/mmv1/third_party/terraform/services/container/go/resource_container_cluster_test.go.tmpl index c630cfe47a4b..b46574d58170 100644 --- a/mmv1/third_party/terraform/services/container/go/resource_container_cluster_test.go.tmpl +++ b/mmv1/third_party/terraform/services/container/go/resource_container_cluster_test.go.tmpl @@ -7,6 +7,7 @@ import ( "regexp" "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/plancheck" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-google/google/acctest" "github.com/hashicorp/terraform-provider-google/google/envvar" @@ -917,6 +918,26 @@ func TestAccContainerCluster_withReleaseChannelEnabledDefaultVersion(t *testing. ImportStateVerify: true, ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, }, + { + Config: testAccContainerCluster_withReleaseChannelEnabledDefaultVersion(clusterName, "EXTENDED", networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.with_release_channel", + ImportStateIdPrefix: "us-central1-a/", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + { + Config: testAccContainerCluster_withReleaseChannelEnabled(clusterName, "EXTENDED", networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.with_release_channel", + ImportStateIdPrefix: "us-central1-a/", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, { Config: testAccContainerCluster_withReleaseChannelEnabled(clusterName, "UNSPECIFIED", networkName, subnetworkName), }, @@ -946,7 +967,7 @@ func TestAccContainerCluster_withInvalidReleaseChannel(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccContainerCluster_withReleaseChannelEnabled(clusterName, "CANARY", networkName, subnetworkName), - ExpectError: regexp.MustCompile(`expected release_channel\.0\.channel to be one of \["?UNSPECIFIED"? "?RAPID"? "?REGULAR"? "?STABLE"?\], got CANARY`), + ExpectError: regexp.MustCompile(`expected release_channel\.0\.channel to be one of \["?UNSPECIFIED"? "?RAPID"? "?REGULAR"? "?STABLE"? "?EXTENDED"?\], got CANARY`), }, }, }) @@ -1514,6 +1535,146 @@ func TestAccContainerCluster_withNodeConfig(t *testing.T) { }) } +// This is for node_config.kubelet_config, which affects the default node-pool +// (default-pool) when created via the google_container_cluster resource +func TestAccContainerCluster_withInsecureKubeletReadonlyPortEnabledInNodeConfigUpdates(t *testing.T) { + t.Parallel() + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withInsecureKubeletReadonlyPortEnabledInNodeConfig(clusterName, networkName, subnetworkName, "TRUE"), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + acctest.ExpectNoDelete(), + }, + }, + }, + { + ResourceName: "google_container_cluster.with_insecure_kubelet_readonly_port_enabled_in_node_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_withInsecureKubeletReadonlyPortEnabledInNodeConfig(clusterName, networkName, subnetworkName, "FALSE"), + }, + { + ResourceName: "google_container_cluster.with_insecure_kubelet_readonly_port_enabled_in_node_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_withInsecureKubeletReadonlyPortEnabledInNodePool(t *testing.T) { + t.Parallel() + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + nodePoolName := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withInsecureKubeletReadonlyPortEnabledInNodePool(clusterName, nodePoolName, networkName, subnetworkName, "TRUE"), + }, + { + ResourceName: "google_container_cluster.with_insecure_kubelet_readonly_port_enabled_in_node_pool", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +// This is for `node_pool_defaults.node_config_defaults` - the default settings +// for newly created nodepools +func TestAccContainerCluster_withInsecureKubeletReadonlyPortEnabledDefaultsUpdates(t *testing.T) { + t.Parallel() + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + // Test API default (no value set in config) first + { + Config: testAccContainerCluster_withInsecureKubeletReadonlyPortEnabledDefaultsUpdateBaseline(clusterName, networkName, subnetworkName), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + acctest.ExpectNoDelete(), + }, + }, + }, + { + ResourceName: "google_container_cluster.with_insecure_kubelet_readonly_port_enabled_node_pool_update", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_withInsecureKubeletReadonlyPortEnabledDefaultsUpdate(clusterName, networkName, subnetworkName, "TRUE"), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + acctest.ExpectNoDelete(), + }, + }, + }, + { + ResourceName: "google_container_cluster.with_insecure_kubelet_readonly_port_enabled_node_pool_update", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_withInsecureKubeletReadonlyPortEnabledDefaultsUpdate(clusterName, networkName, subnetworkName, "FALSE"), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + acctest.ExpectNoDelete(), + }, + }, + }, + { + ResourceName: "google_container_cluster.with_insecure_kubelet_readonly_port_enabled_node_pool_update", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_withInsecureKubeletReadonlyPortEnabledDefaultsUpdate(clusterName, networkName, subnetworkName, "TRUE"), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + acctest.ExpectNoDelete(), + }, + }, + }, + { + ResourceName: "google_container_cluster.with_insecure_kubelet_readonly_port_enabled_node_pool_update", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + + func TestAccContainerCluster_withLoggingVariantInNodeConfig(t *testing.T) { t.Parallel() clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) @@ -3086,6 +3247,52 @@ func TestAccContainerCluster_withAutopilotNetworkTags(t *testing.T) { }) } +func TestAccContainerCluster_withAutopilotKubeletConfig(t *testing.T) { + t.Parallel() + + randomSuffix := acctest.RandString(t, 10) + clusterName := fmt.Sprintf("tf-test-cluster-%s", randomSuffix) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + ExternalProviders: map[string]resource.ExternalProvider{ + "time": {}, + }, + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withAutopilotKubeletConfigBaseline(clusterName), + }, + { + ResourceName: "google_container_cluster.with_autopilot_kubelet_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + { + Config: testAccContainerCluster_withAutopilotKubeletConfigUpdates(clusterName, "FALSE"), + }, + { + ResourceName: "google_container_cluster.with_autopilot_kubelet_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + { + Config: testAccContainerCluster_withAutopilotKubeletConfigUpdates(clusterName, "TRUE"), + }, + { + ResourceName: "google_container_cluster.with_autopilot_kubelet_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + }, + }) +} + + func TestAccContainerCluster_withAutopilotResourceManagerTags(t *testing.T) { t.Parallel() @@ -3261,7 +3468,6 @@ func TestAccContainerCluster_withIdentityServiceConfig(t *testing.T) { }) } -{{ if ne $.TargetVersionName `ga` -}} func TestAccContainerCluster_withSecretManagerConfig(t *testing.T) { t.Parallel() @@ -3312,7 +3518,6 @@ func TestAccContainerCluster_withSecretManagerConfig(t *testing.T) { }, }) } -{{- end }} func TestAccContainerCluster_withLoggingConfig(t *testing.T) { t.Parallel() @@ -3401,24 +3606,6 @@ func TestAccContainerCluster_withMonitoringConfigAdvancedDatapathObservabilityCo ImportStateVerify: true, ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, }, - { - Config: testAccContainerCluster_withMonitoringConfigAdvancedDatapathObservabilityConfigEnabledOld(clusterName), - }, - { - ResourceName: "google_container_cluster.primary", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, - }, - { - Config: testAccContainerCluster_withMonitoringConfigAdvancedDatapathObservabilityConfigDisabledOld(clusterName), - }, - { - ResourceName: "google_container_cluster.primary", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, - }, }, }) } @@ -3957,6 +4144,60 @@ func TestAccContainerCluster_autoprovisioningDefaultsManagement(t *testing.T) { }) } +func TestAccContainerCluster_autoprovisioningLocations(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_autoprovisioningLocations(clusterName, networkName, subnetworkName, []string{"us-central1-a", "us-central1-f"}), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_cluster.with_autoprovisioning_locations", + "cluster_autoscaling.0.enabled", "true"), + + resource.TestCheckResourceAttr("google_container_cluster.with_autoprovisioning_locations", + "cluster_autoscaling.0.auto_provisioning_locations.0", "us-central1-a"), + + resource.TestCheckResourceAttr("google_container_cluster.with_autoprovisioning_locations", + "cluster_autoscaling.0.auto_provisioning_locations.1", "us-central1-f"), + ), + }, + { + ResourceName: "google_container_cluster.with_autoprovisioning_locations", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + { + Config: testAccContainerCluster_autoprovisioningLocations(clusterName, networkName, subnetworkName, []string{"us-central1-b", "us-central1-c"}), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_cluster.with_autoprovisioning_locations", + "cluster_autoscaling.0.enabled", "true"), + + resource.TestCheckResourceAttr("google_container_cluster.with_autoprovisioning_locations", + "cluster_autoscaling.0.auto_provisioning_locations.0", "us-central1-b"), + + resource.TestCheckResourceAttr("google_container_cluster.with_autoprovisioning_locations", + "cluster_autoscaling.0.auto_provisioning_locations.1", "us-central1-c"), + ), + }, + { + ResourceName: "google_container_cluster.with_autoprovisioning_locations", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + }, + }) +} + // This resource originally cleaned up the dangling cluster directly, but now // taints it, having Terraform clean it up during the next apply. This test // name is now inexact, but is being preserved to maintain the test history. @@ -6417,26 +6658,31 @@ resource "google_container_cluster" "with_node_config" { `, clusterName, networkName, subnetworkName) } -func testAccContainerCluster_withLoggingVariantInNodeConfig(clusterName, loggingVariant, networkName, subnetworkName string) string { +func testAccContainerCluster_withInsecureKubeletReadonlyPortEnabledInNodeConfig(clusterName, networkName, subnetworkName, insecureKubeletReadonlyPortEnabled string) string { return fmt.Sprintf(` -resource "google_container_cluster" "with_logging_variant_in_node_config" { +resource "google_container_cluster" "with_insecure_kubelet_readonly_port_enabled_in_node_config" { name = "%s" location = "us-central1-f" initial_node_count = 1 node_config { - logging_variant = "%s" + kubelet_config { + # Must be set when kubelet_config is, but causes permadrift unless set to + # undocumented empty value + cpu_manager_policy = "" + insecure_kubelet_readonly_port_enabled = "%s" + } } deletion_protection = false network = "%s" subnetwork = "%s" } -`, clusterName, loggingVariant, networkName, subnetworkName) +`, clusterName, insecureKubeletReadonlyPortEnabled, networkName, subnetworkName) } -func testAccContainerCluster_withLoggingVariantInNodePool(clusterName, nodePoolName, loggingVariant, networkName, subnetworkName string) string { +func testAccContainerCluster_withInsecureKubeletReadonlyPortEnabledInNodePool(clusterName, nodePoolName, networkName, subnetworkName, insecureKubeletReadonlyPortEnabled string) string { return fmt.Sprintf(` -resource "google_container_cluster" "with_logging_variant_in_node_pool" { +resource "google_container_cluster" "with_insecure_kubelet_readonly_port_enabled_in_node_pool" { name = "%s" location = "us-central1-f" @@ -6444,74 +6690,147 @@ resource "google_container_cluster" "with_logging_variant_in_node_pool" { name = "%s" initial_node_count = 1 node_config { - logging_variant = "%s" + kubelet_config { + cpu_manager_policy = "static" + insecure_kubelet_readonly_port_enabled = "%s" + } } } deletion_protection = false network = "%s" subnetwork = "%s" } -`, clusterName, nodePoolName, loggingVariant, networkName, subnetworkName) +`, clusterName, nodePoolName, insecureKubeletReadonlyPortEnabled, networkName, subnetworkName) } -func testAccContainerCluster_withLoggingVariantNodePoolDefault(clusterName, loggingVariant, networkName, subnetworkName string) string { +func testAccContainerCluster_withInsecureKubeletReadonlyPortEnabledDefaultsUpdateBaseline(clusterName, networkName, subnetworkName string) string { return fmt.Sprintf(` -resource "google_container_cluster" "with_logging_variant_node_pool_default" { +resource "google_container_cluster" "with_insecure_kubelet_readonly_port_enabled_node_pool_update" { name = "%s" location = "us-central1-f" initial_node_count = 1 - node_pool_defaults { - node_config_defaults { - logging_variant = "%s" - } - } deletion_protection = false network = "%s" subnetwork = "%s" } -`, clusterName, loggingVariant, networkName, subnetworkName) +`, clusterName, networkName, subnetworkName) } -func testAccContainerCluster_withAdvancedMachineFeaturesInNodePool(clusterName, nodePoolName, networkName, subnetworkName string, nvEnabled bool) string { +func testAccContainerCluster_withInsecureKubeletReadonlyPortEnabledDefaultsUpdate(clusterName, networkName, subnetworkName, insecureKubeletReadonlyPortEnabled string) string { return fmt.Sprintf(` -resource "google_container_cluster" "with_advanced_machine_features_in_node_pool" { +resource "google_container_cluster" "with_insecure_kubelet_readonly_port_enabled_node_pool_update" { name = "%s" location = "us-central1-f" + initial_node_count = 1 - node_pool { - name = "%s" - initial_node_count = 1 - node_config { - machine_type = "c2-standard-4" - advanced_machine_features { - threads_per_core = 1 - enable_nested_virtualization = "%t" - } + node_pool_defaults { + node_config_defaults { + insecure_kubelet_readonly_port_enabled = "%s" } } deletion_protection = false network = "%s" subnetwork = "%s" } -`, clusterName, nodePoolName, nvEnabled, networkName, subnetworkName) +`, clusterName, insecureKubeletReadonlyPortEnabled, networkName, subnetworkName) } -{{ if ne $.TargetVersionName `ga` -}} -func testAccContainerCluster_withNodePoolDefaults(clusterName, enabled, networkName, subnetworkName string) string { - return fmt.Sprintf(` -resource "google_container_cluster" "with_node_pool_defaults" { +func testAccContainerCluster_withLoggingVariantInNodeConfig(clusterName, loggingVariant, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "with_logging_variant_in_node_config" { name = "%s" location = "us-central1-f" initial_node_count = 1 - node_pool_defaults { - node_config_defaults { - gcfs_config { - enabled = "%s" - } - } - } + node_config { + logging_variant = "%s" + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, clusterName, loggingVariant, networkName, subnetworkName) +} + +func testAccContainerCluster_withLoggingVariantInNodePool(clusterName, nodePoolName, loggingVariant, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "with_logging_variant_in_node_pool" { + name = "%s" + location = "us-central1-f" + + node_pool { + name = "%s" + initial_node_count = 1 + node_config { + logging_variant = "%s" + } + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, clusterName, nodePoolName, loggingVariant, networkName, subnetworkName) +} + +func testAccContainerCluster_withLoggingVariantNodePoolDefault(clusterName, loggingVariant, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "with_logging_variant_node_pool_default" { + name = "%s" + location = "us-central1-f" + initial_node_count = 1 + + node_pool_defaults { + node_config_defaults { + logging_variant = "%s" + } + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, clusterName, loggingVariant, networkName, subnetworkName) +} + +func testAccContainerCluster_withAdvancedMachineFeaturesInNodePool(clusterName, nodePoolName, networkName, subnetworkName string, nvEnabled bool) string { + return fmt.Sprintf(` +resource "google_container_cluster" "with_advanced_machine_features_in_node_pool" { + name = "%s" + location = "us-central1-f" + + node_pool { + name = "%s" + initial_node_count = 1 + node_config { + machine_type = "c2-standard-4" + advanced_machine_features { + threads_per_core = 1 + enable_nested_virtualization = "%t" + } + } + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, clusterName, nodePoolName, nvEnabled, networkName, subnetworkName) +} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccContainerCluster_withNodePoolDefaults(clusterName, enabled, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "with_node_pool_defaults" { + name = "%s" + location = "us-central1-f" + initial_node_count = 1 + + node_pool_defaults { + node_config_defaults { + gcfs_config { + enabled = "%s" + } + } + } deletion_protection = false network = "%s" subnetwork = "%s" @@ -6952,6 +7271,46 @@ resource "google_container_cluster" "with_autoprovisioning_management" { `, clusterName, autoUpgrade, autoRepair, networkName, subnetworkName) } +func testAccContainerCluster_autoprovisioningLocations(clusterName, networkName, subnetworkName string, locations []string) string { + var autoprovisionLocationsStr string + for i := 0; i < len(locations); i++ { + autoprovisionLocationsStr += fmt.Sprintf("\"%s\",", locations[i]) + } + var apl string + if len(autoprovisionLocationsStr) > 0 { + apl = fmt.Sprintf(` + auto_provisioning_locations = [%s] + `, autoprovisionLocationsStr) + } + + return fmt.Sprintf(` +resource "google_container_cluster" "with_autoprovisioning_locations" { + name = "%s" + location = "us-central1-f" + initial_node_count = 1 + + cluster_autoscaling { + enabled = true + + resource_limits { + resource_type = "cpu" + maximum = 2 + } + + resource_limits { + resource_type = "memory" + maximum = 2048 + } + + %s + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, clusterName, apl, networkName, subnetworkName) +} + func testAccContainerCluster_backendRef(cluster, networkName, subnetworkName string) string { return fmt.Sprintf(` resource "google_compute_backend_service" "my-backend-service" { @@ -9360,7 +9719,6 @@ resource "google_container_cluster" "primary" { `, name, networkName, subnetworkName) } -{{ if ne $.TargetVersionName `ga` -}} func testAccContainerCluster_withSecretManagerConfigEnabled(name, networkName, subnetworkName string) string { return fmt.Sprintf(` resource "google_container_cluster" "primary" { @@ -9393,7 +9751,6 @@ resource "google_container_cluster" "primary" { } `, name, networkName, subnetworkName) } -{{- end }} func testAccContainerCluster_withLoggingConfigEnabled(name, networkName, subnetworkName string) string { return fmt.Sprintf(` @@ -9607,56 +9964,6 @@ resource "google_container_cluster" "primary" { `, name, name) } -func testAccContainerCluster_withMonitoringConfigAdvancedDatapathObservabilityConfigEnabledOld(name string) string { - return fmt.Sprintf(` -resource "google_compute_network" "container_network" { - name = "%s-nw" - auto_create_subnetworks = false -} - -resource "google_compute_subnetwork" "container_subnetwork" { - name = google_compute_network.container_network.name - network = google_compute_network.container_network.name - ip_cidr_range = "10.0.36.0/24" - region = "us-central1" - private_ip_google_access = true - - secondary_ip_range { - range_name = "services-range" - ip_cidr_range = "192.168.1.0/24" - } - - secondary_ip_range { - range_name = "pod-ranges" - ip_cidr_range = "192.168.64.0/22" - } -} - -resource "google_container_cluster" "primary" { - name = "%s" - location = "us-central1-a" - initial_node_count = 1 - datapath_provider = "ADVANCED_DATAPATH" - - network = google_compute_network.container_network.name - subnetwork = google_compute_subnetwork.container_subnetwork.name - ip_allocation_policy { - cluster_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[0].range_name - services_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[1].range_name - } - - monitoring_config { - enable_components = [] - advanced_datapath_observability_config { - enable_metrics = true - relay_mode = "INTERNAL_VPC_LB" - } - } - deletion_protection = false -} -`, name, name) -} - func testAccContainerCluster_withMonitoringConfigAdvancedDatapathObservabilityConfigDisabled(name string) string { return fmt.Sprintf(` resource "google_compute_network" "container_network" { @@ -9707,56 +10014,6 @@ resource "google_container_cluster" "primary" { `, name, name) } -func testAccContainerCluster_withMonitoringConfigAdvancedDatapathObservabilityConfigDisabledOld(name string) string { - return fmt.Sprintf(` -resource "google_compute_network" "container_network" { - name = "%s-nw" - auto_create_subnetworks = false -} - -resource "google_compute_subnetwork" "container_subnetwork" { - name = google_compute_network.container_network.name - network = google_compute_network.container_network.name - ip_cidr_range = "10.0.36.0/24" - region = "us-central1" - private_ip_google_access = true - - secondary_ip_range { - range_name = "services-range" - ip_cidr_range = "192.168.1.0/24" - } - - secondary_ip_range { - range_name = "pod-ranges" - ip_cidr_range = "192.168.64.0/22" - } -} - -resource "google_container_cluster" "primary" { - name = "%s" - location = "us-central1-a" - initial_node_count = 1 - datapath_provider = "ADVANCED_DATAPATH" - - network = google_compute_network.container_network.name - subnetwork = google_compute_subnetwork.container_subnetwork.name - ip_allocation_policy { - cluster_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[0].range_name - services_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[1].range_name - } - - monitoring_config { - enable_components = [] - advanced_datapath_observability_config { - enable_metrics = false - relay_mode = "DISABLED" - } - } - deletion_protection = false -} -`, name, name) -} - func testAccContainerCluster_withSoleTenantGroup(name, networkName, subnetworkName string) string { return fmt.Sprintf(` resource "google_compute_node_template" "soletenant-tmpl" { @@ -10305,6 +10562,37 @@ func testAccContainerCluster_withWorkloadALTSConfigAutopilot(projectID, name str {{ end }} +func testAccContainerCluster_withAutopilotKubeletConfigBaseline(name string) string { + return fmt.Sprintf(` + resource "google_container_cluster" "with_autopilot_kubelet_config" { + name = "%s" + location = "us-central1" + initial_node_count = 1 + enable_autopilot = true + deletion_protection = false + } +`, name) +} + +func testAccContainerCluster_withAutopilotKubeletConfigUpdates(name, insecureKubeletReadonlyPortEnabled string) string { + return fmt.Sprintf(` + resource "google_container_cluster" "with_autopilot_kubelet_config" { + name = "%s" + location = "us-central1" + initial_node_count = 1 + + node_pool_auto_config { + node_kubelet_config { + insecure_kubelet_readonly_port_enabled = "%s" + } + } + + enable_autopilot = true + deletion_protection = false + } +`, name, insecureKubeletReadonlyPortEnabled) +} + func testAccContainerCluster_resourceManagerTags(projectID, clusterName, networkName, subnetworkName, randomSuffix string) string { return fmt.Sprintf(` data "google_project" "project" { @@ -10817,6 +11105,7 @@ resource "google_container_cluster" "with_autopilot" { } func TestAccContainerCluster_privateRegistry(t *testing.T) { + acctest.SkipIfVcr(t) t.Parallel() clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) @@ -10935,6 +11224,11 @@ resource "google_container_cluster" "primary" { network = "%s" subnetwork = "%s" + node_config { + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform", + ] + } node_pool_defaults { node_config_defaults { containerd_config { @@ -10970,11 +11264,6 @@ resource "google_container_cluster" "primary" { network = "%s" subnetwork = "%s" - node_config { - oauth_scopes = [ - "https://www.googleapis.com/auth/cloud-platform", - ] - } node_pool_defaults { node_config_defaults { containerd_config { @@ -11102,3 +11391,158 @@ resource "google_container_cluster" "primary" { } `, secretID, clusterName, networkName, subnetworkName) } + +func TestAccContainerCluster_withProviderDefaultLabels(t *testing.T) { + // The test failed if VCR testing is enabled, because the cached provider config is used. + // With the cached provider config, any changes in the provider default labels will not be applied. + acctest.SkipIfVcr(t) + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withProviderDefaultLabels(clusterName, networkName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_cluster.primary", "resource_labels.%", "1"), + resource.TestCheckResourceAttr("google_container_cluster.primary", "resource_labels.created-by", "terraform"), + + resource.TestCheckResourceAttr("google_container_cluster.primary", "terraform_labels.%", "2"), + resource.TestCheckResourceAttr("google_container_cluster.primary", "terraform_labels.default_key1", "default_value1"), + resource.TestCheckResourceAttr("google_container_cluster.primary", "terraform_labels.created-by", "terraform"), + + resource.TestCheckResourceAttr("google_container_cluster.primary", "effective_labels.%", "2"), + ), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"remove_default_node_pool", "deletion_protection", "resource_labels", "terraform_labels"}, + }, + { + Config: testAccContainerCluster_resourceLabelsOverridesProviderDefaultLabels(clusterName, networkName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_cluster.primary", "resource_labels.%", "2"), + resource.TestCheckResourceAttr("google_container_cluster.primary", "resource_labels.created-by", "terraform"), + resource.TestCheckResourceAttr("google_container_cluster.primary", "terraform_labels.default_key1", "value1"), + + resource.TestCheckResourceAttr("google_container_cluster.primary", "terraform_labels.%", "2"), + resource.TestCheckResourceAttr("google_container_cluster.primary", "terraform_labels.default_key1", "value1"), + resource.TestCheckResourceAttr("google_container_cluster.primary", "terraform_labels.created-by", "terraform"), + + resource.TestCheckResourceAttr("google_container_cluster.primary", "effective_labels.%", "2"), + ), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"remove_default_node_pool", "deletion_protection", "resource_labels", "terraform_labels"}, + }, + { + Config: testAccContainerCluster_moveResourceLabelToProviderDefaultLabels(clusterName, networkName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_cluster.primary", "resource_labels.%", "0"), + + resource.TestCheckResourceAttr("google_container_cluster.primary", "terraform_labels.%", "2"), + resource.TestCheckResourceAttr("google_container_cluster.primary", "terraform_labels.default_key1", "default_value1"), + resource.TestCheckResourceAttr("google_container_cluster.primary", "terraform_labels.created-by", "terraform"), + + resource.TestCheckResourceAttr("google_container_cluster.primary", "effective_labels.%", "2"), + ), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"remove_default_node_pool", "deletion_protection", "resource_labels", "terraform_labels"}, + }, + { + Config: testAccContainerCluster_basic(clusterName, networkName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_cluster.primary", "resource_labels.%", "0"), + resource.TestCheckResourceAttr("google_container_cluster.primary", "terraform_labels.%", "0"), + resource.TestCheckResourceAttr("google_container_cluster.primary", "effective_labels.%", "0"), + ), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"remove_default_node_pool", "deletion_protection", "resource_labels", "terraform_labels"}, + }, + }, + }) +} + +func testAccContainerCluster_withProviderDefaultLabels(name, networkName, subnetworkName string) string { + return fmt.Sprintf(` +provider "google" { + default_labels = { + default_key1 = "default_value1" + } +} + +resource "google_container_cluster" "primary" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + deletion_protection = false + network = "%s" + subnetwork = "%s" + resource_labels = { + created-by = "terraform" + } +} +`, name, networkName, subnetworkName) +} + +func testAccContainerCluster_resourceLabelsOverridesProviderDefaultLabels(name, networkName, subnetworkName string) string { + return fmt.Sprintf(` +provider "google" { + default_labels = { + default_key1 = "default_value1" + } +} + +resource "google_container_cluster" "primary" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + deletion_protection = false + network = "%s" + subnetwork = "%s" + resource_labels = { + created-by = "terraform" + default_key1 = "value1" + } +} +`, name, networkName, subnetworkName) +} + +func testAccContainerCluster_moveResourceLabelToProviderDefaultLabels(name, networkName, subnetworkName string) string { + return fmt.Sprintf(` +provider "google" { + default_labels = { + default_key1 = "default_value1" + created-by = "terraform" + } +} + +resource "google_container_cluster" "primary" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, name, networkName, subnetworkName) +} diff --git a/mmv1/third_party/terraform/services/container/go/resource_container_node_pool_test.go.tmpl b/mmv1/third_party/terraform/services/container/go/resource_container_node_pool_test.go.tmpl index 00b6b19d17a3..36a609075836 100644 --- a/mmv1/third_party/terraform/services/container/go/resource_container_node_pool_test.go.tmpl +++ b/mmv1/third_party/terraform/services/container/go/resource_container_node_pool_test.go.tmpl @@ -6,6 +6,7 @@ import ( "testing" "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/plancheck" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-google/google/acctest" "github.com/hashicorp/terraform-provider-google/google/envvar" @@ -526,10 +527,17 @@ func TestAccContainerNodePool_withKubeletConfig(t *testing.T) { CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), Steps: []resource.TestStep{ { - Config: testAccContainerNodePool_withKubeletConfig(cluster, np, "static", "100ms", networkName, subnetworkName, true, 2048), + Config: testAccContainerNodePool_withKubeletConfig(cluster, np, "static", "100ms", networkName, subnetworkName, "TRUE", true, 2048), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + acctest.ExpectNoDelete(), + }, + }, Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr("google_container_node_pool.with_kubelet_config", "node_config.0.kubelet_config.0.cpu_cfs_quota", "true"), + resource.TestCheckResourceAttr("google_container_node_pool.with_kubelet_config", + "node_config.0.kubelet_config.0.insecure_kubelet_readonly_port_enabled", "TRUE"), resource.TestCheckResourceAttr("google_container_node_pool.with_kubelet_config", "node_config.0.kubelet_config.0.pod_pids_limit", "2048"), ), @@ -540,10 +548,17 @@ func TestAccContainerNodePool_withKubeletConfig(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccContainerNodePool_withKubeletConfig(cluster, np, "", "", networkName, subnetworkName, false, 1024), + Config: testAccContainerNodePool_withKubeletConfig(cluster, np, "", "", networkName, subnetworkName, "FALSE", false, 1024), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + acctest.ExpectNoDelete(), + }, + }, Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr("google_container_node_pool.with_kubelet_config", "node_config.0.kubelet_config.0.cpu_cfs_quota", "false"), + resource.TestCheckResourceAttr("google_container_node_pool.with_kubelet_config", + "node_config.0.kubelet_config.0.insecure_kubelet_readonly_port_enabled", "FALSE"), ), }, { @@ -571,7 +586,7 @@ func TestAccContainerNodePool_withInvalidKubeletCpuManagerPolicy(t *testing.T) { CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), Steps: []resource.TestStep{ { - Config: testAccContainerNodePool_withKubeletConfig(cluster, np, "dontexist", "100us", networkName, subnetworkName, true, 1024), + Config: testAccContainerNodePool_withKubeletConfig(cluster, np, "dontexist", "100us", networkName, subnetworkName,"TRUE", false, 1024), ExpectError: regexp.MustCompile(`.*to be one of \["?static"? "?none"? "?"?\].*`), }, }, @@ -1292,39 +1307,6 @@ func TestAccContainerNodePool_regionalClusters(t *testing.T) { }) } -func TestAccContainerNodePool_012_ConfigModeAttr(t *testing.T) { - t.Parallel() - - cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) - np := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) - networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") - subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccContainerNodePool_012_ConfigModeAttr1(cluster, np, networkName, subnetworkName), - }, - { - ResourceName: "google_container_node_pool.np", - ImportState: true, - ImportStateVerify: true, - }, - { - Config: testAccContainerNodePool_012_ConfigModeAttr2(cluster, np, networkName, subnetworkName), - }, - { - ResourceName: "google_container_node_pool.np", - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - func TestAccContainerNodePool_EmptyGuestAccelerator(t *testing.T) { t.Parallel() @@ -3136,7 +3118,7 @@ resource "google_container_node_pool" "with_sandbox_config" { } {{- end }} -func testAccContainerNodePool_withKubeletConfig(cluster, np, policy, period, networkName, subnetworkName string, quota bool, podPidsLimit int) string { +func testAccContainerNodePool_withKubeletConfig(cluster, np, policy, period, networkName, subnetworkName, insecureKubeletReadonlyPortEnabled string, quota bool, podPidsLimit int) string { return fmt.Sprintf(` data "google_container_engine_versions" "central1a" { location = "us-central1-a" @@ -3162,10 +3144,11 @@ resource "google_container_node_pool" "with_kubelet_config" { node_config { image_type = "COS_CONTAINERD" kubelet_config { - cpu_manager_policy = %q - cpu_cfs_quota = %v - cpu_cfs_quota_period = %q - pod_pids_limit = %d + cpu_manager_policy = %q + cpu_cfs_quota = %v + cpu_cfs_quota_period = %q + insecure_kubelet_readonly_port_enabled = "%s" + pod_pids_limit = %d } oauth_scopes = [ "https://www.googleapis.com/auth/logging.write", @@ -3174,7 +3157,7 @@ resource "google_container_node_pool" "with_kubelet_config" { logging_variant = "DEFAULT" } } -`, cluster, networkName, subnetworkName, np, policy, quota, period, podPidsLimit) +`, cluster, networkName, subnetworkName, np, policy, quota, period, insecureKubeletReadonlyPortEnabled, podPidsLimit) } func testAccContainerNodePool_withLinuxNodeConfig(cluster, np, tcpMem, networkName, subnetworkName string) string { @@ -3752,58 +3735,6 @@ resource "google_container_node_pool" "np" { `, cluster, networkName, subnetworkName, np) } -func testAccContainerNodePool_012_ConfigModeAttr1(cluster, np, networkName, subnetworkName string) string { - return fmt.Sprintf(` -resource "google_container_cluster" "cluster" { - name = "%s" - location = "us-central1-f" - initial_node_count = 3 - deletion_protection = false - network = "%s" - subnetwork = "%s" -} - -resource "google_container_node_pool" "np" { - name = "%s" - location = "us-central1-f" - cluster = google_container_cluster.cluster.name - initial_node_count = 1 - - node_config { - guest_accelerator { - count = 1 - type = "nvidia-tesla-t4" - } - machine_type = "n1-highmem-4" - } -} -`, cluster, networkName, subnetworkName, np) -} - -func testAccContainerNodePool_012_ConfigModeAttr2(cluster, np, networkName, subnetworkName string) string { - return fmt.Sprintf(` -resource "google_container_cluster" "cluster" { - name = "%s" - location = "us-central1-f" - initial_node_count = 3 - deletion_protection = false - network = "%s" - subnetwork = "%s" -} - -resource "google_container_node_pool" "np" { - name = "%s" - location = "us-central1-f" - cluster = google_container_cluster.cluster.name - initial_node_count = 1 - - node_config { - guest_accelerator = [] - } -} -`, cluster, networkName, subnetworkName, np) -} - func testAccContainerNodePool_EmptyGuestAccelerator(cluster, np, networkName, subnetworkName string) string { return fmt.Sprintf(` resource "google_container_cluster" "cluster" { @@ -4873,6 +4804,7 @@ resource "google_container_node_pool" "np" { } func TestAccContainerNodePool_defaultDriverInstallation(t *testing.T) { + acctest.SkipIfVcr(t) t.Parallel() cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) @@ -4897,13 +4829,17 @@ func TestAccContainerNodePool_defaultDriverInstallation(t *testing.T) { func testAccContainerNodePool_defaultDriverInstallation(cluster, np string) string { return fmt.Sprintf(` +data "google_container_engine_versions" "central1a" { + location = "us-central1-a" +} + resource "google_container_cluster" "cluster" { name = "%s" location = "us-central1-a" initial_node_count = 3 deletion_protection = false - min_master_version = "1.30.1-gke.1329003" + min_master_version = data.google_container_engine_versions.central1a.release_channel_latest_version["RAPID"] release_channel { channel = "RAPID" } @@ -4931,4 +4867,4 @@ resource "google_container_node_pool" "np" { } } `, cluster, np) -} \ No newline at end of file +} diff --git a/mmv1/third_party/terraform/services/dataflow/go/resource_dataflow_flex_template_job_test.go.tmpl b/mmv1/third_party/terraform/services/dataflow/go/resource_dataflow_flex_template_job_test.go.tmpl index 0ae3a078f41b..a8d095fe46f7 100644 --- a/mmv1/third_party/terraform/services/dataflow/go/resource_dataflow_flex_template_job_test.go.tmpl +++ b/mmv1/third_party/terraform/services/dataflow/go/resource_dataflow_flex_template_job_test.go.tmpl @@ -1349,7 +1349,7 @@ resource "google_dataflow_flex_template_job" "flex_job_kms" { kms_key_name = "%s" } -`, topicName, bucket, crypto_key, job) +`, topicName, bucket, job, crypto_key) } func testAccDataflowFlexTemplateJob_additionalExperiments(job, bucket, topicName string, experiments []string) string { diff --git a/mmv1/third_party/terraform/services/dataform/go/resource_dataform_repository_test.go.tmpl b/mmv1/third_party/terraform/services/dataform/go/resource_dataform_repository_test.go.tmpl index f650c90f298b..35c1c39f2b07 100644 --- a/mmv1/third_party/terraform/services/dataform/go/resource_dataform_repository_test.go.tmpl +++ b/mmv1/third_party/terraform/services/dataform/go/resource_dataform_repository_test.go.tmpl @@ -94,7 +94,7 @@ resource "google_sourcerepo_repository" "git_repository" { resource "google_secret_manager_secret" "secret" { provider = google-beta - secret_id = "secret" + secret_id = "tf-test-secret%{random_suffix}" replication { auto {} diff --git a/mmv1/third_party/terraform/services/dataproc/go/resource_dataproc_cluster_test.go b/mmv1/third_party/terraform/services/dataproc/go/resource_dataproc_cluster_test.go index 20012fab7365..962b8f521239 100644 --- a/mmv1/third_party/terraform/services/dataproc/go/resource_dataproc_cluster_test.go +++ b/mmv1/third_party/terraform/services/dataproc/go/resource_dataproc_cluster_test.go @@ -105,6 +105,8 @@ func TestAccDataprocCluster_basic(t *testing.T) { } func TestAccDataprocVirtualCluster_basic(t *testing.T) { + // Currently failing + acctest.SkipIfVcr(t) t.Parallel() var cluster dataproc.Cluster diff --git a/mmv1/third_party/terraform/services/dialogflow/go/resource_dialogflow_agent_test.go b/mmv1/third_party/terraform/services/dialogflow/go/resource_dialogflow_agent_test.go index 640d82c9e1e5..8e9c3aed7b54 100644 --- a/mmv1/third_party/terraform/services/dialogflow/go/resource_dialogflow_agent_test.go +++ b/mmv1/third_party/terraform/services/dialogflow/go/resource_dialogflow_agent_test.go @@ -3,9 +3,9 @@ package dialogflow_test import ( "testing" - "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-provider-google/google/acctest" "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" ) func TestAccDialogflowAgent_update(t *testing.T) { diff --git a/mmv1/third_party/terraform/services/dns/go/resource_dns_record_set_test.go.tmpl b/mmv1/third_party/terraform/services/dns/go/resource_dns_record_set_test.go.tmpl index 69fee4e86421..f1ce98b8067b 100644 --- a/mmv1/third_party/terraform/services/dns/go/resource_dns_record_set_test.go.tmpl +++ b/mmv1/third_party/terraform/services/dns/go/resource_dns_record_set_test.go.tmpl @@ -328,6 +328,15 @@ func TestAccDNSRecordSet_routingPolicy(t *testing.T) { ImportState: true, ImportStateVerify: true, }, + { + Config: testAccDnsRecordSet_routingPolicyRegionalL7PrimaryBackupMultipleNoLbType(networkName, proxySubnetName, httpHealthCheckName, backendName, urlMapName, httpProxyName, forwardingRuleName, zoneName, 300), + }, + { + ResourceName: "google_dns_record_set.foobar", + ImportStateId: fmt.Sprintf("%s/%s/test-record.%s.hashicorptest.com./A", envvar.GetTestProjectFromEnv(), zoneName, zoneName), + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccDnsRecordSet_routingPolicyCrossRegionL7PrimaryBackup(networkName, backendSubnetName, proxySubnetName, httpHealthCheckName, backendName, urlMapName, httpProxyName, forwardingRuleName, zoneName, 300), }, @@ -1065,6 +1074,127 @@ resource "google_dns_record_set" "foobar" { `, networkName, proxySubnetName, healthCheckName, backendName, urlMapName, httpProxyName, forwardingRuleName, zoneName, zoneName, zoneName, ttl) } +func testAccDnsRecordSet_routingPolicyRegionalL7PrimaryBackupMultipleNoLbType(networkName, proxySubnetName, healthCheckName, backendName, urlMapName, httpProxyName, forwardingRuleName, zoneName string, ttl int) string { + return fmt.Sprintf(` +resource "google_compute_network" "default" { + name = "%s" +} + +resource "google_compute_subnetwork" "proxy_subnet" { + name = "%s" + ip_cidr_range = "10.100.0.0/24" + region = "us-central1" + purpose = "INTERNAL_HTTPS_LOAD_BALANCER" + role = "ACTIVE" + network = google_compute_network.default.id +} + +resource "google_compute_region_health_check" "health_check" { + name = "%s" + region = "us-central1" + + http_health_check { + port = 80 + } +} + +resource "google_compute_region_backend_service" "backend" { + name = "%s" + region = "us-central1" + load_balancing_scheme = "INTERNAL_MANAGED" + protocol = "HTTP" + health_checks = [google_compute_region_health_check.health_check.id] +} + +resource "google_compute_region_url_map" "url_map" { + name = "%s" + region = "us-central1" + default_service = google_compute_region_backend_service.backend.id +} + +resource "google_compute_region_target_http_proxy" "http_proxy" { + name = "%s" + region = "us-central1" + url_map = google_compute_region_url_map.url_map.id +} + +resource "google_compute_forwarding_rule" "default" { + name = "%s" + region = "us-central1" + depends_on = [google_compute_subnetwork.proxy_subnet] + load_balancing_scheme = "INTERNAL_MANAGED" + target = google_compute_region_target_http_proxy.http_proxy.id + port_range = "80" + allow_global_access = true + network = google_compute_network.default.name + ip_protocol = "TCP" +} + +resource "google_compute_forwarding_rule" "duplicate" { + name = "%s" + region = "us-central1" + depends_on = [google_compute_subnetwork.proxy_subnet] + load_balancing_scheme = "INTERNAL_MANAGED" + target = google_compute_region_target_http_proxy.http_proxy.id + port_range = "80" + allow_global_access = true + network = google_compute_network.default.name + ip_protocol = "TCP" +} + +resource "google_dns_managed_zone" "parent-zone" { + name = "%s" + dns_name = "%s.hashicorptest.com." + description = "Test Description" + visibility = "private" +} + +resource "google_dns_record_set" "foobar" { + managed_zone = google_dns_managed_zone.parent-zone.name + name = "test-record.%s.hashicorptest.com." + type = "A" + ttl = %d + + routing_policy { + primary_backup { + trickle_ratio = 0.1 + enable_geo_fencing_for_backups = true + + primary { + internal_load_balancers { + ip_address = google_compute_forwarding_rule.default.ip_address + port = "80" + ip_protocol = "tcp" + network_url = google_compute_network.default.id + project = google_compute_forwarding_rule.default.project + region = google_compute_forwarding_rule.default.region + } + + internal_load_balancers { + ip_address = google_compute_forwarding_rule.duplicate.ip_address + port = "80" + ip_protocol = "tcp" + network_url = google_compute_network.default.id + project = google_compute_forwarding_rule.duplicate.project + region = google_compute_forwarding_rule.duplicate.region + } + } + + backup_geo { + location = "us-west1" + rrdatas = ["1.2.3.4"] + } + + backup_geo { + location = "asia-east1" + rrdatas = ["5.6.7.8"] + } + } + } +} +`, networkName, proxySubnetName, healthCheckName, backendName, urlMapName, httpProxyName, forwardingRuleName, forwardingRuleName+"-2", zoneName, zoneName, zoneName, ttl) +} + func testAccDnsRecordSet_routingPolicyCrossRegionL7PrimaryBackup(networkName, backendSubnetName, proxySubnetName, healthCheckName, backendName, urlMapName, httpProxyName, forwardingRuleName, zoneName string, ttl int) string { return fmt.Sprintf(` resource "google_compute_network" "default" { diff --git a/mmv1/third_party/terraform/services/firebase/go/data_source_google_firebase_android_app_config_test.go.tmpl b/mmv1/third_party/terraform/services/firebase/go/data_source_google_firebase_android_app_config_test.go.tmpl index b8c2fc402273..dc799efdc85d 100644 --- a/mmv1/third_party/terraform/services/firebase/go/data_source_google_firebase_android_app_config_test.go.tmpl +++ b/mmv1/third_party/terraform/services/firebase/go/data_source_google_firebase_android_app_config_test.go.tmpl @@ -16,7 +16,7 @@ func TestAccDataSourceGoogleFirebaseAndroidAppConfig(t *testing.T) { context := map[string]interface{}{ "project_id": envvar.GetTestProjectFromEnv(), - "package_name": "android.package.app" + acctest.RandString(t, 5), + "package_name": "android.package.app" + acctest.RandString(t, 5), "display_name": "tf-test Display Name AndroidAppConfig DataSource", } diff --git a/mmv1/third_party/terraform/services/gkehub/go/resource_gke_hub_feature_membership_test.go.tmpl b/mmv1/third_party/terraform/services/gkehub/go/resource_gke_hub_feature_membership_test.go.tmpl index 0efe16dc20f4..b3f60174a717 100644 --- a/mmv1/third_party/terraform/services/gkehub/go/resource_gke_hub_feature_membership_test.go.tmpl +++ b/mmv1/third_party/terraform/services/gkehub/go/resource_gke_hub_feature_membership_test.go.tmpl @@ -158,11 +158,13 @@ resource "google_gke_hub_feature_membership" "feature_member_1" { version = "1.18.2" config_sync { source_format = "hierarchy" + enabled = true git { sync_repo = "https://github.com/GoogleCloudPlatform/magic-modules" secret_type = "none" } } + management = "MANAGEMENT_AUTOMATIC" } } diff --git a/mmv1/third_party/terraform/services/gkehub2/go/resource_gke_hub_feature_test.go.tmpl b/mmv1/third_party/terraform/services/gkehub2/go/resource_gke_hub_feature_test.go.tmpl index cb2517c469f4..310b805dcaa2 100644 --- a/mmv1/third_party/terraform/services/gkehub2/go/resource_gke_hub_feature_test.go.tmpl +++ b/mmv1/third_party/terraform/services/gkehub2/go/resource_gke_hub_feature_test.go.tmpl @@ -492,10 +492,53 @@ func TestAccGKEHubFeature_FleetDefaultMemberConfigConfigManagement(t *testing.T) ImportState: true, ImportStateVerify: true, }, + { + Config: testAccGKEHubFeature_FleetDefaultMemberConfigConfigManagementEnableAutomaticManagementUpdate(context), + }, + { + ResourceName: "google_gke_hub_feature.feature", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccGKEHubFeature_FleetDefaultMemberConfigConfigManagementRemovalUpdate(context), + }, + { + ResourceName: "google_gke_hub_feature.feature", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccGKEHubFeature_FleetDefaultMemberConfigConfigManagementAutomaticManagement(context), + }, + { + ResourceName: "google_gke_hub_feature.feature", + ImportState: true, + ImportStateVerify: true, + }, }, }) } +func testAccGKEHubFeature_FleetDefaultMemberConfigConfigManagementAutomaticManagement(context map[string]interface{}) string { + return gkeHubFeatureProjectSetupForGA(context) + acctest.Nprintf(` +resource "google_gke_hub_feature" "feature" { + name = "configmanagement" + location = "global" + fleet_default_member_config { + configmanagement { + management = "MANAGEMENT_AUTOMATIC" + config_sync { + enabled = true + } + } + } + depends_on = [google_project_service.anthos, google_project_service.gkehub, google_project_service.acm] + project = google_project.project.project_id +} +`, context) +} + func testAccGKEHubFeature_FleetDefaultMemberConfigConfigManagement(context map[string]interface{}) string { return gkeHubFeatureProjectSetupForGA(context) + acctest.Nprintf(` resource "google_gke_hub_feature" "feature" { @@ -531,6 +574,7 @@ resource "google_gke_hub_feature" "feature" { fleet_default_member_config { configmanagement { version = "1.16.1" + management = "MANAGEMENT_MANUAL" config_sync { enabled = true prevent_drift = true @@ -551,6 +595,45 @@ resource "google_gke_hub_feature" "feature" { `, context) } +func testAccGKEHubFeature_FleetDefaultMemberConfigConfigManagementEnableAutomaticManagementUpdate(context map[string]interface{}) string { + return gkeHubFeatureProjectSetupForGA(context) + acctest.Nprintf(` +resource "google_gke_hub_feature" "feature" { + name = "configmanagement" + location = "global" + fleet_default_member_config { + configmanagement { + version = "1.16.1" + management = "MANAGEMENT_AUTOMATIC" + config_sync { + prevent_drift = true + source_format = "unstructured" + oci { + sync_repo = "us-central1-docker.pkg.dev/corp-gke-build-artifacts/acm/configs:latest" + policy_dir = "/acm/nonprod-root/" + secret_type = "gcpserviceaccount" + sync_wait_secs = "15" + gcp_service_account_email = "gke-cluster@gke-foo-nonprod.iam.gserviceaccount.com" + } + } + } + } + depends_on = [google_project_service.anthos, google_project_service.gkehub, google_project_service.acm] + project = google_project.project.project_id +} +`, context) +} + +func testAccGKEHubFeature_FleetDefaultMemberConfigConfigManagementRemovalUpdate(context map[string]interface{}) string { + return gkeHubFeatureProjectSetupForGA(context) + acctest.Nprintf(` +resource "google_gke_hub_feature" "feature" { + name = "configmanagement" + location = "global" + depends_on = [google_project_service.anthos, google_project_service.gkehub, google_project_service.acm] + project = google_project.project.project_id +} +`, context) +} + func TestAccGKEHubFeature_Clusterupgrade(t *testing.T) { // VCR fails to handle batched project services acctest.SkipIfVcr(t) diff --git a/mmv1/third_party/terraform/services/iam2/go/resource_iam_deny_policy_test.go b/mmv1/third_party/terraform/services/iam2/go/resource_iam_deny_policy_test.go index 3725784c4660..c1227b201afa 100644 --- a/mmv1/third_party/terraform/services/iam2/go/resource_iam_deny_policy_test.go +++ b/mmv1/third_party/terraform/services/iam2/go/resource_iam_deny_policy_test.go @@ -76,7 +76,7 @@ func TestAccIAM2DenyPolicy_iamDenyPolicyFolderParent(t *testing.T) { ImportStateVerify: true, ImportStateVerifyIgnore: []string{"name", "parent"}, }, - { + { Config: testAccIAM2DenyPolicy_iamDenyPolicyFolderUpdate(context), }, { diff --git a/mmv1/third_party/terraform/services/kms/go/resource_kms_crypto_key_test.go.tmpl b/mmv1/third_party/terraform/services/kms/go/resource_kms_crypto_key_test.go.tmpl new file mode 100644 index 000000000000..8d3b03899014 --- /dev/null +++ b/mmv1/third_party/terraform/services/kms/go/resource_kms_crypto_key_test.go.tmpl @@ -0,0 +1,1309 @@ +package kms_test + +import ( + "context" + "fmt" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/services/kms" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" +) + +func TestCryptoKeyIdParsing(t *testing.T) { + t.Parallel() + + cases := map[string]struct { + ImportId string + ExpectedError bool + ExpectedTerraformId string + ExpectedCryptoKeyId string + Config *transport_tpg.Config + }{ + "id is in project/location/keyRingName/cryptoKeyName format": { + ImportId: "test-project/us-central1/test-key-ring/test-key-name", + ExpectedError: false, + ExpectedTerraformId: "test-project/us-central1/test-key-ring/test-key-name", + ExpectedCryptoKeyId: "projects/test-project/locations/us-central1/keyRings/test-key-ring/cryptoKeys/test-key-name", + }, + "id is in domain:project/location/keyRingName/cryptoKeyName format": { + ImportId: "example.com:test-project/us-central1/test-key-ring/test-key-name", + ExpectedError: false, + ExpectedTerraformId: "example.com:test-project/us-central1/test-key-ring/test-key-name", + ExpectedCryptoKeyId: "projects/example.com:test-project/locations/us-central1/keyRings/test-key-ring/cryptoKeys/test-key-name", + }, + "id contains name that is longer than 63 characters": { + ImportId: "test-project/us-central1/test-key-ring/can-you-believe-that-this-cryptokey-name-is-this-extravagantly-long", + ExpectedError: true, + }, + "id is in location/keyRingName/cryptoKeyName format": { + ImportId: "us-central1/test-key-ring/test-key-name", + ExpectedError: false, + ExpectedTerraformId: "test-project/us-central1/test-key-ring/test-key-name", + ExpectedCryptoKeyId: "projects/test-project/locations/us-central1/keyRings/test-key-ring/cryptoKeys/test-key-name", + Config: &transport_tpg.Config{Project: "test-project"}, + }, + "id is in location/keyRingName/cryptoKeyName format without project in config": { + ImportId: "us-central1/test-key-ring/test-key-name", + ExpectedError: true, + Config: &transport_tpg.Config{Project: ""}, + }, + } + + for tn, tc := range cases { + cryptoKeyId, err := kms.ParseKmsCryptoKeyId(tc.ImportId, tc.Config) + + if tc.ExpectedError && err == nil { + t.Fatalf("bad: %s, expected an error", tn) + } + + if err != nil { + if tc.ExpectedError { + continue + } + t.Fatalf("bad: %s, err: %#v", tn, err) + } + + if cryptoKeyId.TerraformId() != tc.ExpectedTerraformId { + t.Fatalf("bad: %s, expected Terraform ID to be `%s` but is `%s`", tn, tc.ExpectedTerraformId, cryptoKeyId.TerraformId()) + } + + if cryptoKeyId.CryptoKeyId() != tc.ExpectedCryptoKeyId { + t.Fatalf("bad: %s, expected CryptoKey ID to be `%s` but is `%s`", tn, tc.ExpectedCryptoKeyId, cryptoKeyId.CryptoKeyId()) + } + } +} + +func TestCryptoKeyStateUpgradeV0(t *testing.T) { + t.Parallel() + + cases := map[string]struct { + Attributes map[string]interface{} + Expected map[string]string + Meta interface{} + }{ + "change key_ring from terraform id fmt to link fmt": { + Attributes: map[string]interface{}{ + "key_ring": "my-project/my-location/my-key-ring", + }, + Expected: map[string]string{ + "key_ring": "projects/my-project/locations/my-location/keyRings/my-key-ring", + }, + Meta: &transport_tpg.Config{}, + }, + "key_ring link fmt stays as link fmt": { + Attributes: map[string]interface{}{ + "key_ring": "projects/my-project/locations/my-location/keyRings/my-key-ring", + }, + Expected: map[string]string{ + "key_ring": "projects/my-project/locations/my-location/keyRings/my-key-ring", + }, + Meta: &transport_tpg.Config{}, + }, + "key_ring without project to link fmt": { + Attributes: map[string]interface{}{ + "key_ring": "my-location/my-key-ring", + }, + Expected: map[string]string{ + "key_ring": "projects/my-project/locations/my-location/keyRings/my-key-ring", + }, + Meta: &transport_tpg.Config{ + Project: "my-project", + }, + }, + } + for tn, tc := range cases { + t.Run(tn, func(t *testing.T) { + actual, err := kms.ResourceKMSCryptoKeyUpgradeV0(context.Background(), tc.Attributes, tc.Meta) + + if err != nil { + t.Error(err) + } + + for k, v := range tc.Expected { + if actual[k] != v { + t.Errorf("expected: %#v -> %#v\n got: %#v -> %#v\n in: %#v", + k, v, k, actual[k], actual) + } + } + }) + } +} + +func TestAccKmsCryptoKey_basic(t *testing.T) { + t.Parallel() + + projectId := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + projectOrg := envvar.GetTestOrgFromEnv(t) + location := envvar.GetTestRegionFromEnv() + projectBillingAccount := envvar.GetTestBillingAccountFromEnv(t) + keyRingName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + cryptoKeyName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testGoogleKmsCryptoKey_basic(projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("google_kms_crypto_key.crypto_key", "primary.0.name"), + ), + }, + { + ResourceName: "google_kms_crypto_key.crypto_key", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + // Test importing with a short id + { + ResourceName: "google_kms_crypto_key.crypto_key", + ImportState: true, + ImportStateId: fmt.Sprintf("%s/%s/%s/%s", projectId, location, keyRingName, cryptoKeyName), + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + // Use a separate TestStep rather than a CheckDestroy because we need the project to still exist. + { + Config: testGoogleKmsCryptoKey_removed(projectId, projectOrg, projectBillingAccount, keyRingName), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleKmsCryptoKeyWasRemovedFromState("google_kms_crypto_key.crypto_key"), + testAccCheckGoogleKmsCryptoKeyVersionsDestroyed(t, projectId, location, keyRingName, cryptoKeyName), + testAccCheckGoogleKmsCryptoKeyRotationDisabled(t, projectId, location, keyRingName, cryptoKeyName), + ), + }, + }, + }) +} + +func TestAccKmsCryptoKey_rotation(t *testing.T) { + // when rotation is set, next rotation time is set using time.Now + acctest.SkipIfVcr(t) + t.Parallel() + + projectId := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + projectOrg := envvar.GetTestOrgFromEnv(t) + location := envvar.GetTestRegionFromEnv() + projectBillingAccount := envvar.GetTestBillingAccountFromEnv(t) + keyRingName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + cryptoKeyName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + rotationPeriod := "100000s" + updatedRotationPeriod := "7776000s" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testGoogleKmsCryptoKey_rotation(projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName, rotationPeriod), + }, + { + ResourceName: "google_kms_crypto_key.crypto_key", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testGoogleKmsCryptoKey_rotation(projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName, updatedRotationPeriod), + }, + { + ResourceName: "google_kms_crypto_key.crypto_key", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testGoogleKmsCryptoKey_rotationRemoved(projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName), + }, + { + ResourceName: "google_kms_crypto_key.crypto_key", + ImportState: true, + ImportStateVerify: true, + }, + // Use a separate TestStep rather than a CheckDestroy because we need the project to still exist. + { + Config: testGoogleKmsCryptoKey_removed(projectId, projectOrg, projectBillingAccount, keyRingName), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleKmsCryptoKeyWasRemovedFromState("google_kms_crypto_key.crypto_key"), + testAccCheckGoogleKmsCryptoKeyVersionsDestroyed(t, projectId, location, keyRingName, cryptoKeyName), + testAccCheckGoogleKmsCryptoKeyRotationDisabled(t, projectId, location, keyRingName, cryptoKeyName), + ), + }, + }, + }) +} + +func TestAccKmsCryptoKey_template(t *testing.T) { + t.Parallel() + + projectId := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + projectOrg := envvar.GetTestOrgFromEnv(t) + location := envvar.GetTestRegionFromEnv() + projectBillingAccount := envvar.GetTestBillingAccountFromEnv(t) + keyRingName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + cryptoKeyName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + algorithm := "EC_SIGN_P256_SHA256" + updatedAlgorithm := "EC_SIGN_P384_SHA384" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testGoogleKmsCryptoKey_template(projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName, algorithm), + }, + { + ResourceName: "google_kms_crypto_key.crypto_key", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testGoogleKmsCryptoKey_template(projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName, updatedAlgorithm), + }, + { + ResourceName: "google_kms_crypto_key.crypto_key", + ImportState: true, + ImportStateVerify: true, + }, + // Use a separate TestStep rather than a CheckDestroy because we need the project to still exist. + { + Config: testGoogleKmsCryptoKey_removed(projectId, projectOrg, projectBillingAccount, keyRingName), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleKmsCryptoKeyWasRemovedFromState("google_kms_crypto_key.crypto_key"), + testAccCheckGoogleKmsCryptoKeyVersionsDestroyed(t, projectId, location, keyRingName, cryptoKeyName), + testAccCheckGoogleKmsCryptoKeyRotationDisabled(t, projectId, location, keyRingName, cryptoKeyName), + ), + }, + }, + }) +} + +func TestAccKmsCryptoKey_destroyDuration(t *testing.T) { + t.Parallel() + + projectId := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + projectOrg := envvar.GetTestOrgFromEnv(t) + location := envvar.GetTestRegionFromEnv() + projectBillingAccount := envvar.GetTestBillingAccountFromEnv(t) + keyRingName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + cryptoKeyName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testGoogleKmsCryptoKey_destroyDuration(projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName), + }, + { + ResourceName: "google_kms_crypto_key.crypto_key", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + // Use a separate TestStep rather than a CheckDestroy because we need the project to still exist. + { + Config: testGoogleKmsCryptoKey_removed(projectId, projectOrg, projectBillingAccount, keyRingName), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleKmsCryptoKeyWasRemovedFromState("google_kms_crypto_key.crypto_key"), + testAccCheckGoogleKmsCryptoKeyVersionsDestroyed(t, projectId, location, keyRingName, cryptoKeyName), + testAccCheckGoogleKmsCryptoKeyRotationDisabled(t, projectId, location, keyRingName, cryptoKeyName), + ), + }, + }, + }) +} + +{{ if ne $.TargetVersionName `ga` -}} +func TestAccKmsCryptoKey_keyAccessJustificationsPolicy(t *testing.T) { + t.Parallel() + + projectId := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + projectOrg := envvar.GetTestOrgFromEnv(t) + location := envvar.GetTestRegionFromEnv() + projectBillingAccount := envvar.GetTestBillingAccountFromEnv(t) + keyRingName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + cryptoKeyName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + allowedAccessReason := "CUSTOMER_INITIATED_SUPPORT" + updatedAllowedAccessReason := "GOOGLE_INITIATED_SERVICE" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + Steps: []resource.TestStep{ + { + Config: testGoogleKmsCryptoKey_keyAccessJustificationsPolicy(projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName, allowedAccessReason), + }, + { + ResourceName: "google_kms_crypto_key.crypto_key", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testGoogleKmsCryptoKey_keyAccessJustificationsPolicy(projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName, updatedAllowedAccessReason), + }, + { + ResourceName: "google_kms_crypto_key.crypto_key", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + // Use a separate TestStep rather than a CheckDestroy because we need the project to still exist. + { + Config: testGoogleKmsCryptoKey_removedBeta(projectId, projectOrg, projectBillingAccount, keyRingName), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleKmsCryptoKeyWasRemovedFromState("google_kms_crypto_key.crypto_key"), + testAccCheckGoogleKmsCryptoKeyVersionsDestroyed(t, projectId, location, keyRingName, cryptoKeyName), + testAccCheckGoogleKmsCryptoKeyRotationDisabled(t, projectId, location, keyRingName, cryptoKeyName), + ), + }, + }, + }) +} +{{- end }} + +func TestAccKmsCryptoKey_importOnly(t *testing.T) { + t.Parallel() + + projectId := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + projectOrg := envvar.GetTestOrgFromEnv(t) + location := envvar.GetTestRegionFromEnv() + projectBillingAccount := envvar.GetTestBillingAccountFromEnv(t) + keyRingName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + cryptoKeyName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testGoogleKmsCryptoKey_importOnly(projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName), + }, + { + ResourceName: "google_kms_crypto_key.crypto_key", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"skip_initial_version_creation", "labels", "terraform_labels"}, + }, + // Use a separate TestStep rather than a CheckDestroy because we need the project to still exist. + { + Config: testGoogleKmsCryptoKey_removed(projectId, projectOrg, projectBillingAccount, keyRingName), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleKmsCryptoKeyWasRemovedFromState("google_kms_crypto_key.crypto_key"), + testAccCheckGoogleKmsCryptoKeyVersionsDestroyed(t, projectId, location, keyRingName, cryptoKeyName), + testAccCheckGoogleKmsCryptoKeyRotationDisabled(t, projectId, location, keyRingName, cryptoKeyName), + ), + }, + }, + }) +} + +// KMS KeyRings cannot be deleted. This ensures that the CryptoKey resource was removed from state, +// even though the server-side resource was not removed. +func testAccCheckGoogleKmsCryptoKeyWasRemovedFromState(resourceName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + _, ok := s.RootModule().Resources[resourceName] + + if ok { + return fmt.Errorf("Resource was not removed from state: %s", resourceName) + } + + return nil + } +} + +// KMS KeyRings cannot be deleted. This ensures that the CryptoKey resource's CryptoKeyVersion +// sub-resources were scheduled to be destroyed, rendering the key itself inoperable. +func testAccCheckGoogleKmsCryptoKeyVersionsDestroyed(t *testing.T, projectId, location, keyRingName, cryptoKeyName string) resource.TestCheckFunc { + return func(_ *terraform.State) error { + config := acctest.GoogleProviderConfig(t) + gcpResourceUri := fmt.Sprintf("projects/%s/locations/%s/keyRings/%s/cryptoKeys/%s", projectId, location, keyRingName, cryptoKeyName) + + response, err := config.NewKmsClient(config.UserAgent).Projects.Locations.KeyRings.CryptoKeys.CryptoKeyVersions.List(gcpResourceUri).Do() + + if err != nil { + return fmt.Errorf("Unexpected failure to list versions: %s", err) + } + + versions := response.CryptoKeyVersions + + for _, v := range versions { + if v.State != "DESTROY_SCHEDULED" && v.State != "DESTROYED" { + return fmt.Errorf("CryptoKey %s should have no versions, but version %s has state %s", cryptoKeyName, v.Name, v.State) + } + } + + return nil + } +} + +// KMS KeyRings cannot be deleted. This ensures that the CryptoKey autorotation +// was disabled to prevent more versions of the key from being created. +func testAccCheckGoogleKmsCryptoKeyRotationDisabled(t *testing.T, projectId, location, keyRingName, cryptoKeyName string) resource.TestCheckFunc { + return func(_ *terraform.State) error { + config := acctest.GoogleProviderConfig(t) + gcpResourceUri := fmt.Sprintf("projects/%s/locations/%s/keyRings/%s/cryptoKeys/%s", projectId, location, keyRingName, cryptoKeyName) + + response, err := config.NewKmsClient(config.UserAgent).Projects.Locations.KeyRings.CryptoKeys.Get(gcpResourceUri).Do() + if err != nil { + return fmt.Errorf("Unexpected failure while verifying 'deleted' crypto key: %s", err) + } + + if response.NextRotationTime != "" { + return fmt.Errorf("Expected empty nextRotationTime for 'deleted' crypto key, got %s", response.NextRotationTime) + } + if response.RotationPeriod != "" { + return fmt.Errorf("Expected empty RotationPeriod for 'deleted' crypto key, got %s", response.RotationPeriod) + } + + return nil + } +} + +func TestAccKmsCryptoKeyVersion_basic(t *testing.T) { + t.Parallel() + + projectId := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + projectOrg := envvar.GetTestOrgFromEnv(t) + projectBillingAccount := envvar.GetTestBillingAccountFromEnv(t) + keyRingName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + cryptoKeyName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testGoogleKmsCryptoKeyVersion_basic(projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName), + }, + { + ResourceName: "google_kms_crypto_key_version.crypto_key_version", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testGoogleKmsCryptoKeyVersion_removed(projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName), + }, + }, + }) +} + +func TestAccKmsCryptoKeyVersionWithSymmetricHSM(t *testing.T) { + t.Parallel() + + projectId := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + projectOrg := envvar.GetTestOrgFromEnv(t) + projectBillingAccount := envvar.GetTestBillingAccountFromEnv(t) + keyRingName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + cryptoKeyName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testGoogleKmsCryptoKeyVersionWithSymmetricHSM(projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName), + }, + { + ResourceName: "google_kms_crypto_key_version.crypto_key_version", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testGoogleKmsCryptoKeyVersion_removed(projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName), + }, + }, + }) +} + +func TestAccKmsCryptoKeyVersion_skipInitialVersion(t *testing.T) { + t.Parallel() + + projectId := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + projectOrg := envvar.GetTestOrgFromEnv(t) + projectBillingAccount := envvar.GetTestBillingAccountFromEnv(t) + keyRingName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + cryptoKeyName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testGoogleKmsCryptoKeyVersion_skipInitialVersion(projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName), + }, + { + ResourceName: "google_kms_crypto_key_version.crypto_key_version", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + }, + }) +} + +func TestAccKmsCryptoKeyVersion_patch(t *testing.T) { + t.Parallel() + + projectId := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + projectOrg := envvar.GetTestOrgFromEnv(t) + projectBillingAccount := envvar.GetTestBillingAccountFromEnv(t) + keyRingName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + cryptoKeyName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + state := "DISABLED" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testGoogleKmsCryptoKeyVersion_patchInitialize(projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName), + }, + { + ResourceName: "google_kms_crypto_key_version.crypto_key_version", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testGoogleKmsCryptoKeyVersion_patch("true", projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName, state), + }, + { + ResourceName: "google_kms_crypto_key_version.crypto_key_version", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testGoogleKmsCryptoKeyVersion_patch("false", projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName, state), + }, + }, + }) +} + +func TestAccKmsCryptoKeyVersion_externalProtectionLevelOptions(t *testing.T) { + t.Parallel() + + projectId := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + projectOrg := envvar.GetTestOrgFromEnv(t) + projectBillingAccount := envvar.GetTestBillingAccountFromEnv(t) + keyRingName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + cryptoKeyName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + keyUri := "data.google_secret_manager_secret_version.key_uri.secret_data" + updatedKeyUri := "data.google_secret_manager_secret_version.key_uri_updated.secret_data" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testGoogleKmsCryptoKeyVersion_externalProtectionLevelOptions(projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName, keyUri), + }, + { + ResourceName: "google_kms_crypto_key_version.crypto_key_version", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testGoogleKmsCryptoKeyVersion_externalProtectionLevelOptions(projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName, updatedKeyUri), + }, + { + ResourceName: "google_kms_crypto_key_version.crypto_key_version", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + }, + }) +} + +func TestAccKmsCryptoKeyVersion_externalProtectionLevelOptionsVpc(t *testing.T) { + // This test relies on manual steps to set up the EkmConnection used for the + // CryptoKeyVersion creation, which means we can't spin up a temporary project. + // We also can't use bootstrapped keys because that would defeat the purpose of + // this key creation test, so we skip this test for VCR to avoid KMS resource + // accumulation in the TF test project (since KMS resources can't be deleted). + acctest.SkipIfVcr(t) + t.Parallel() + + projectId := envvar.GetTestProjectFromEnv() + keyRingName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + cryptoKeyName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + ekmConnectionName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + keyPath := "data.google_secret_manager_secret_version.key_path.secret_data" + updatedKeyPath := "data.google_secret_manager_secret_version.key_path_updated.secret_data" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testGoogleKmsCryptoKeyVersion_externalProtectionLevelOptionsVpc(projectId, keyRingName, cryptoKeyName, ekmConnectionName, keyPath), + }, + { + ResourceName: "google_kms_crypto_key_version.crypto_key_version", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testGoogleKmsCryptoKeyVersion_externalProtectionLevelOptionsVpc(projectId, keyRingName, cryptoKeyName, ekmConnectionName, updatedKeyPath), + }, + { + ResourceName: "google_kms_crypto_key_version.crypto_key_version", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + }, + }) +} + +// This test runs in its own project, otherwise the test project would start to get filled +// with undeletable resources +func testGoogleKmsCryptoKey_basic(projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName string) string { + return fmt.Sprintf(` +resource "google_project" "acceptance" { + name = "%s" + project_id = "%s" + org_id = "%s" + billing_account = "%s" + deletion_policy = "DELETE" +} + +resource "google_project_service" "acceptance" { + project = google_project.acceptance.project_id + service = "cloudkms.googleapis.com" +} + +resource "google_kms_key_ring" "key_ring" { + project = google_project_service.acceptance.project + name = "%s" + location = "us-central1" +} + +resource "google_kms_crypto_key" "crypto_key" { + name = "%s" + key_ring = google_kms_key_ring.key_ring.id + labels = { + key = "value" + } +} +`, projectId, projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName) +} + +func testGoogleKmsCryptoKey_rotation(projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName, rotationPeriod string) string { + return fmt.Sprintf(` +resource "google_project" "acceptance" { + name = "%s" + project_id = "%s" + org_id = "%s" + billing_account = "%s" + deletion_policy = "DELETE" +} + +resource "google_project_service" "acceptance" { + project = google_project.acceptance.project_id + service = "cloudkms.googleapis.com" +} + +resource "google_kms_key_ring" "key_ring" { + project = google_project_service.acceptance.project + name = "%s" + location = "us-central1" +} + +resource "google_kms_crypto_key" "crypto_key" { + name = "%s" + key_ring = google_kms_key_ring.key_ring.id + rotation_period = "%s" +} +`, projectId, projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName, rotationPeriod) +} + +func testGoogleKmsCryptoKey_rotationRemoved(projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName string) string { + return fmt.Sprintf(` +resource "google_project" "acceptance" { + name = "%s" + project_id = "%s" + org_id = "%s" + billing_account = "%s" + deletion_policy = "DELETE" +} + +resource "google_project_service" "acceptance" { + project = google_project.acceptance.project_id + service = "cloudkms.googleapis.com" +} + +resource "google_kms_key_ring" "key_ring" { + project = google_project_service.acceptance.project + name = "%s" + location = "us-central1" +} + +resource "google_kms_crypto_key" "crypto_key" { + name = "%s" + key_ring = google_kms_key_ring.key_ring.id +} +`, projectId, projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName) +} + +func testGoogleKmsCryptoKey_template(projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName, algorithm string) string { + return fmt.Sprintf(` +resource "google_project" "acceptance" { + name = "%s" + project_id = "%s" + org_id = "%s" + billing_account = "%s" + deletion_policy = "DELETE" +} + +resource "google_project_service" "acceptance" { + project = google_project.acceptance.project_id + service = "cloudkms.googleapis.com" +} + +resource "google_kms_key_ring" "key_ring" { + project = google_project_service.acceptance.project + name = "%s" + location = "us-central1" +} + +resource "google_kms_crypto_key" "crypto_key" { + name = "%s" + key_ring = google_kms_key_ring.key_ring.id + purpose = "ASYMMETRIC_SIGN" + + version_template { + algorithm = "%s" + } +} +`, projectId, projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName, algorithm) +} + +func testGoogleKmsCryptoKey_removed(projectId, projectOrg, projectBillingAccount, keyRingName string) string { + return fmt.Sprintf(` +resource "google_project" "acceptance" { + name = "%s" + project_id = "%s" + org_id = "%s" + billing_account = "%s" + deletion_policy = "DELETE" +} + +resource "google_project_service" "acceptance" { + project = google_project.acceptance.project_id + service = "cloudkms.googleapis.com" +} + +resource "google_kms_key_ring" "key_ring" { + project = google_project_service.acceptance.project + name = "%s" + location = "us-central1" +} +`, projectId, projectId, projectOrg, projectBillingAccount, keyRingName) +} + +{{ if ne $.TargetVersionName `ga` -}} +func testGoogleKmsCryptoKey_removedBeta(projectId, projectOrg, projectBillingAccount, keyRingName string) string { + return fmt.Sprintf(` +resource "google_project" "acceptance" { + provider = google-beta + name = "%s" + project_id = "%s" + org_id = "%s" + billing_account = "%s" + deletion_policy = "DELETE" +} + +resource "google_project_service" "acceptance" { + provider = google-beta + project = google_project.acceptance.project_id + service = "cloudkms.googleapis.com" +} + +resource "google_kms_key_ring" "key_ring" { + provider = google-beta + project = google_project_service.acceptance.project + name = "%s" + location = "us-central1" +} +`, projectId, projectId, projectOrg, projectBillingAccount, keyRingName) +} +{{- end }} + +func testGoogleKmsCryptoKey_destroyDuration(projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName string) string { + return fmt.Sprintf(` +resource "google_project" "acceptance" { + name = "%s" + project_id = "%s" + org_id = "%s" + billing_account = "%s" + deletion_policy = "DELETE" +} + +resource "google_project_service" "acceptance" { + project = google_project.acceptance.project_id + service = "cloudkms.googleapis.com" +} + +resource "google_kms_key_ring" "key_ring" { + project = google_project_service.acceptance.project + name = "%s" + location = "us-central1" +} + +resource "google_kms_crypto_key" "crypto_key" { + name = "%s" + key_ring = google_kms_key_ring.key_ring.id + labels = { + key = "value" + } + destroy_scheduled_duration = "129600s" +} +`, projectId, projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName) +} + +{{ if ne $.TargetVersionName `ga` -}} +func testGoogleKmsCryptoKey_keyAccessJustificationsPolicy(projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName, allowed_access_reason string) string { + return fmt.Sprintf(` +resource "google_project" "acceptance" { + provider = google-beta + name = "%s" + project_id = "%s" + org_id = "%s" + billing_account = "%s" + deletion_policy = "DELETE" +} + +resource "google_project_service" "acceptance" { + provider = google-beta + project = google_project.acceptance.project_id + service = "cloudkms.googleapis.com" +} + +resource "google_kms_key_ring" "key_ring" { + provider = google-beta + project = google_project_service.acceptance.project + name = "%s" + location = "us-central1" +} + +resource "google_kms_crypto_key" "crypto_key" { + provider = google-beta + name = "%s" + key_ring = google_kms_key_ring.key_ring.id + labels = { + key = "value" + } + key_access_justifications_policy { + allowed_access_reasons = ["%s"] + } +} +`, projectId, projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName, allowed_access_reason) +} +{{- end }} + +func testGoogleKmsCryptoKey_importOnly(projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName string) string { + return fmt.Sprintf(` +resource "google_project" "acceptance" { + name = "%s" + project_id = "%s" + org_id = "%s" + billing_account = "%s" + deletion_policy = "DELETE" +} + +resource "google_project_service" "acceptance" { + project = google_project.acceptance.project_id + service = "cloudkms.googleapis.com" +} + +resource "google_kms_key_ring" "key_ring" { + project = google_project_service.acceptance.project + name = "%s" + location = "us-central1" +} + +resource "google_kms_crypto_key" "crypto_key" { + name = "%s" + key_ring = google_kms_key_ring.key_ring.id + labels = { + key = "value" + } + skip_initial_version_creation = true + import_only = true +} +`, projectId, projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName) +} + +func testGoogleKmsCryptoKeyVersion_basic(projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName string) string { + return fmt.Sprintf(` +resource "google_project" "acceptance" { + name = "%s" + project_id = "%s" + org_id = "%s" + billing_account = "%s" + deletion_policy = "DELETE" +} + +resource "google_project_service" "acceptance" { + project = google_project.acceptance.project_id + service = "cloudkms.googleapis.com" +} + +resource "google_kms_key_ring" "key_ring" { + project = google_project_service.acceptance.project + name = "%s" + location = "us-central1" +} + +resource "google_kms_crypto_key" "crypto_key" { + name = "%s" + key_ring = google_kms_key_ring.key_ring.id + labels = { + key = "value" + } +} + +resource "google_kms_crypto_key_version" "crypto_key_version" { + crypto_key = google_kms_crypto_key.crypto_key.id +} +`, projectId, projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName) +} + +func testGoogleKmsCryptoKeyVersionWithSymmetricHSM(projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName string) string { + return fmt.Sprintf(` +resource "google_project" "acceptance" { + name = "%s" + project_id = "%s" + org_id = "%s" + billing_account = "%s" + deletion_policy = "DELETE" +} + +resource "google_project_service" "acceptance" { + project = google_project.acceptance.project_id + service = "cloudkms.googleapis.com" +} + +resource "google_kms_key_ring" "key_ring" { + project = google_project_service.acceptance.project + name = "%s" + location = "us-central1" +} + +resource "google_kms_crypto_key" "crypto_key" { + name = "%s" + key_ring = google_kms_key_ring.key_ring.id + labels = { + key = "value" + } + version_template { + algorithm = "GOOGLE_SYMMETRIC_ENCRYPTION" + protection_level = "HSM" + } +} + +resource "google_kms_crypto_key_version" "crypto_key_version" { + crypto_key = google_kms_crypto_key.crypto_key.id +} +`, projectId, projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName) +} + +func testGoogleKmsCryptoKeyVersion_removed(projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName string) string { + return fmt.Sprintf(` +resource "google_project" "acceptance" { + name = "%s" + project_id = "%s" + org_id = "%s" + billing_account = "%s" + deletion_policy = "DELETE" +} + +resource "google_project_service" "acceptance" { + project = google_project.acceptance.project_id + service = "cloudkms.googleapis.com" +} + +resource "google_kms_key_ring" "key_ring" { + project = google_project_service.acceptance.project + name = "%s" + location = "us-central1" +} + +resource "google_kms_crypto_key" "crypto_key" { + name = "%s" + key_ring = google_kms_key_ring.key_ring.id + labels = { + key = "value" + } +} +`, projectId, projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName) +} + +func testGoogleKmsCryptoKeyVersion_skipInitialVersion(projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName string) string { + return fmt.Sprintf(` +resource "google_project" "acceptance" { + name = "%s" + project_id = "%s" + org_id = "%s" + billing_account = "%s" + deletion_policy = "DELETE" +} + +resource "google_project_service" "acceptance" { + project = google_project.acceptance.project_id + service = "cloudkms.googleapis.com" +} + +resource "google_kms_key_ring" "key_ring" { + project = google_project_service.acceptance.project + name = "%s" + location = "us-central1" +} + +resource "google_kms_crypto_key" "crypto_key" { + name = "%s" + key_ring = google_kms_key_ring.key_ring.id + labels = { + key = "value" + } + skip_initial_version_creation = true +} + +resource "google_kms_crypto_key_version" "crypto_key_version" { + crypto_key = google_kms_crypto_key.crypto_key.id +} +`, projectId, projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName) +} +func testGoogleKmsCryptoKeyVersion_patchInitialize(projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName string) string { + return fmt.Sprintf(` +resource "google_project" "acceptance" { + name = "%s" + project_id = "%s" + org_id = "%s" + billing_account = "%s" + deletion_policy = "DELETE" +} + +resource "google_project_service" "acceptance" { + project = google_project.acceptance.project_id + service = "cloudkms.googleapis.com" +} + +resource "google_kms_key_ring" "key_ring" { + project = google_project_service.acceptance.project + name = "%s" + location = "us-central1" +} + +resource "google_kms_crypto_key" "crypto_key" { + name = "%s" + key_ring = google_kms_key_ring.key_ring.id + labels = { + key = "value" + } +} + +resource "google_kms_crypto_key_version" "crypto_key_version" { + crypto_key = google_kms_crypto_key.crypto_key.id + lifecycle { + prevent_destroy = true + } + state = "ENABLED" +} +`, projectId, projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName) +} + +func testGoogleKmsCryptoKeyVersion_patch(preventDestroy, projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName, state string) string { + return fmt.Sprintf(` +resource "google_project" "acceptance" { + name = "%s" + project_id = "%s" + org_id = "%s" + billing_account = "%s" + deletion_policy = "DELETE" +} + +resource "google_project_service" "acceptance" { + project = google_project.acceptance.project_id + service = "cloudkms.googleapis.com" +} + +resource "google_kms_key_ring" "key_ring" { + project = google_project_service.acceptance.project + name = "%s" + location = "us-central1" +} + +resource "google_kms_crypto_key" "crypto_key" { + name = "%s" + key_ring = google_kms_key_ring.key_ring.id + labels = { + key = "value" + } +} + +resource "google_kms_crypto_key_version" "crypto_key_version" { + crypto_key = google_kms_crypto_key.crypto_key.id + lifecycle { + prevent_destroy = %s + } + state = "%s" +} +`, projectId, projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName, preventDestroy, state) +} + +func testGoogleKmsCryptoKeyVersion_externalProtectionLevelOptions(projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName, keyUri string) string { + return fmt.Sprintf(` +resource "google_project" "acceptance" { + name = "%s" + project_id = "%s" + org_id = "%s" + billing_account = "%s" + deletion_policy = "DELETE" +} + +resource "google_project_service" "acceptance" { + project = google_project.acceptance.project_id + service = "cloudkms.googleapis.com" +} + +resource "google_kms_key_ring" "key_ring" { + project = google_project_service.acceptance.project + name = "%s" + location = "us-central1" +} + +resource "google_kms_crypto_key" "crypto_key" { + name = "%s" + key_ring = google_kms_key_ring.key_ring.id + + version_template { + algorithm = "EXTERNAL_SYMMETRIC_ENCRYPTION" + protection_level = "EXTERNAL" + } + + labels = { + key = "value" + } + skip_initial_version_creation = true +} + +data "google_secret_manager_secret_version" "key_uri" { + secret = "external-full-key-uri" + project = "315636579862" +} +data "google_secret_manager_secret_version" "key_uri_updated" { + secret = "external-full-key-uri-update-test" + project = "315636579862" +} + +resource "google_kms_crypto_key_version" "crypto_key_version" { + crypto_key = google_kms_crypto_key.crypto_key.id + external_protection_level_options { + external_key_uri = %s + } +} +`, projectId, projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName, keyUri) +} + +// EkmConnection setup and creation is based off of resource_kms_ekm_connection_test.go +func testGoogleKmsCryptoKeyVersion_externalProtectionLevelOptionsVpc(projectId, keyRingName, cryptoKeyName, ekmConnectionName, keyPath string) string { + return fmt.Sprintf(` +data "google_project" "vpc-project" { + project_id = "cloud-ekm-refekm-playground" +} +data "google_project" "project" { + project_id = "%s" +} + +data "google_secret_manager_secret_version" "raw_der" { + secret = "playground-cert" + project = "315636579862" +} +data "google_secret_manager_secret_version" "hostname" { + secret = "external-uri" + project = "315636579862" +} +data "google_secret_manager_secret_version" "servicedirectoryservice" { + secret = "external-servicedirectoryservice" + project = "315636579862" +} + +resource "google_project_iam_member" "add_sdviewer" { + project = data.google_project.vpc-project.number + role = "roles/servicedirectory.viewer" + member = "serviceAccount:service-${data.google_project.project.number}@gcp-sa-ekms.iam.gserviceaccount.com" +} +resource "google_project_iam_member" "add_pscAuthorizedService" { + project = data.google_project.vpc-project.number + role = "roles/servicedirectory.pscAuthorizedService" + member = "serviceAccount:service-${data.google_project.project.number}@gcp-sa-ekms.iam.gserviceaccount.com" +} + +resource "google_kms_ekm_connection" "example-ekmconnection" { + name = "%s" + location = "us-central1" + key_management_mode = "MANUAL" + service_resolvers { + service_directory_service = data.google_secret_manager_secret_version.servicedirectoryservice.secret_data + hostname = data.google_secret_manager_secret_version.hostname.secret_data + server_certificates { + raw_der = data.google_secret_manager_secret_version.raw_der.secret_data + } + } + depends_on = [ + google_project_iam_member.add_pscAuthorizedService, + google_project_iam_member.add_sdviewer + ] +} + +resource "google_kms_key_ring" "key_ring" { + project = data.google_project.project.project_id + name = "%s" + location = "us-central1" +} + +resource "google_kms_crypto_key" "crypto_key" { + name = "%s" + key_ring = google_kms_key_ring.key_ring.id + + version_template { + algorithm = "EXTERNAL_SYMMETRIC_ENCRYPTION" + protection_level = "EXTERNAL_VPC" + } + + labels = { + key = "value" + } + crypto_key_backend = google_kms_ekm_connection.example-ekmconnection.id + skip_initial_version_creation = true +} + +data "google_secret_manager_secret_version" "key_path" { + secret = "external-keypath" + project = "315636579862" +} +data "google_secret_manager_secret_version" "key_path_updated" { + secret = "external-keypath-update-test" + project = "315636579862" +} + +resource "google_kms_crypto_key_version" "crypto_key_version" { + crypto_key = google_kms_crypto_key.crypto_key.id + external_protection_level_options { + ekm_connection_key_path = %s + } +} +`, projectId, ekmConnectionName, keyRingName, cryptoKeyName, keyPath) +} diff --git a/mmv1/third_party/terraform/services/networksecurity/go/resource_network_security_client_tls_policy_test.go.tmpl b/mmv1/third_party/terraform/services/networksecurity/go/resource_network_security_client_tls_policy_test.go.tmpl index e32616174019..45106b1dbdec 100644 --- a/mmv1/third_party/terraform/services/networksecurity/go/resource_network_security_client_tls_policy_test.go.tmpl +++ b/mmv1/third_party/terraform/services/networksecurity/go/resource_network_security_client_tls_policy_test.go.tmpl @@ -10,6 +10,7 @@ import ( ) func TestAccNetworkSecurityClientTlsPolicy_update(t *testing.T) { + acctest.SkipIfVcr(t) t.Parallel() clientTlsPolicyName := fmt.Sprintf("tf-test-client-tls-policy-%s", acctest.RandString(t, 10)) diff --git a/mmv1/third_party/terraform/services/networkservices/go/resource_network_services_tcp_route_test.go.tmpl b/mmv1/third_party/terraform/services/networkservices/go/resource_network_services_tcp_route_test.go.tmpl index 916f326ac9d0..f2c0987c06ce 100644 --- a/mmv1/third_party/terraform/services/networkservices/go/resource_network_services_tcp_route_test.go.tmpl +++ b/mmv1/third_party/terraform/services/networkservices/go/resource_network_services_tcp_route_test.go.tmpl @@ -74,6 +74,7 @@ resource "google_network_services_tcp_route" "foobar" { weight = 1 } original_destination = false + idle_timeout = "60s" } } } @@ -111,6 +112,7 @@ resource "google_network_services_tcp_route" "foobar" { weight = 1 } original_destination = false + idle_timeout = "120s" } } } diff --git a/mmv1/third_party/terraform/services/parallelstore/go/resource_parallelstore_instance_test.go.tmpl b/mmv1/third_party/terraform/services/parallelstore/go/resource_parallelstore_instance_test.go.tmpl index 0ffa5fc7ee63..740f2f5dde37 100644 --- a/mmv1/third_party/terraform/services/parallelstore/go/resource_parallelstore_instance_test.go.tmpl +++ b/mmv1/third_party/terraform/services/parallelstore/go/resource_parallelstore_instance_test.go.tmpl @@ -1,3 +1,21 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + + package parallelstore_test {{ if ne $.TargetVersionName `ga` -}} @@ -5,8 +23,7 @@ import ( "testing" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - - "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/acctest" ) func TestAccParallelstoreInstance_parallelstoreInstanceBasicExample_update(t *testing.T) { @@ -53,6 +70,8 @@ resource "google_parallelstore_instance" "instance" { capacity_gib = 12000 network = google_compute_network.network.name reserved_ip_range = google_compute_global_address.private_ip_alloc.name + file_stripe_level = "FILE_STRIPE_LEVEL_MIN" + directory_stripe_level = "DIRECTORY_STRIPE_LEVEL_MIN" labels = { test = "value" } diff --git a/mmv1/third_party/terraform/services/redis/go/resource_redis_cluster_test.go.tmpl b/mmv1/third_party/terraform/services/redis/go/resource_redis_cluster_test.go.tmpl index 6009bc618f23..960e6cce3250 100644 --- a/mmv1/third_party/terraform/services/redis/go/resource_redis_cluster_test.go.tmpl +++ b/mmv1/third_party/terraform/services/redis/go/resource_redis_cluster_test.go.tmpl @@ -23,7 +23,7 @@ func TestAccRedisCluster_createClusterWithNodeType(t *testing.T) { Steps: []resource.TestStep{ { // create cluster with replica count 1 - Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 1, shardCount: 3, deletionProtectionEnabled: true, nodeType: "REDIS_STANDARD_SMALL", zoneDistributionMode: "MULTI_ZONE"}), + Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 1, shardCount: 3, deletionProtectionEnabled: true, nodeType: "REDIS_STANDARD_SMALL", zoneDistributionMode: "MULTI_ZONE", maintenanceDay: "TUESDAY", maintenanceHours: 2, maintenanceMinutes: 0, maintenanceSeconds: 0, maintenanceNanos: 0}), }, { ResourceName: "google_redis_cluster.test", @@ -33,7 +33,7 @@ func TestAccRedisCluster_createClusterWithNodeType(t *testing.T) { }, { // clean up the resource - Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 1, shardCount: 3, deletionProtectionEnabled: false, nodeType: "REDIS_STANDARD_SMALL", zoneDistributionMode: "MULTI_ZONE"}), + Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 1, shardCount: 3, deletionProtectionEnabled: false, nodeType: "REDIS_STANDARD_SMALL", zoneDistributionMode: "MULTI_ZONE", maintenanceDay: "TUESDAY", maintenanceHours: 2, maintenanceMinutes: 0, maintenanceSeconds: 0, maintenanceNanos: 0}), }, }, }) @@ -82,7 +82,7 @@ func TestAccRedisCluster_updateReplicaCount(t *testing.T) { Steps: []resource.TestStep{ { // create cluster with replica count 1 - Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 1, shardCount: 3, deletionProtectionEnabled: true, zoneDistributionMode: "MULTI_ZONE"}), + Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 1, shardCount: 3, deletionProtectionEnabled: true, zoneDistributionMode: "MULTI_ZONE", maintenanceDay: "MONDAY", maintenanceHours: 1, maintenanceMinutes: 0, maintenanceSeconds: 0, maintenanceNanos: 0}), }, { ResourceName: "google_redis_cluster.test", @@ -92,7 +92,7 @@ func TestAccRedisCluster_updateReplicaCount(t *testing.T) { }, { // update replica count to 2 - Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 2, shardCount: 3, deletionProtectionEnabled: true, zoneDistributionMode: "MULTI_ZONE"}), + Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 2, shardCount: 3, deletionProtectionEnabled: true, zoneDistributionMode: "MULTI_ZONE", maintenanceDay: "MONDAY", maintenanceHours: 1, maintenanceMinutes: 0, maintenanceSeconds: 0, maintenanceNanos: 0}), }, { ResourceName: "google_redis_cluster.test", @@ -102,7 +102,7 @@ func TestAccRedisCluster_updateReplicaCount(t *testing.T) { }, { // update replica count to 0 - Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 0, shardCount: 3, deletionProtectionEnabled: true, zoneDistributionMode: "MULTI_ZONE"}), + Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 0, shardCount: 3, deletionProtectionEnabled: true, zoneDistributionMode: "MULTI_ZONE", maintenanceDay: "MONDAY", maintenanceHours: 1, maintenanceMinutes: 0, maintenanceSeconds: 0, maintenanceNanos: 0}), }, { ResourceName: "google_redis_cluster.test", @@ -112,7 +112,7 @@ func TestAccRedisCluster_updateReplicaCount(t *testing.T) { }, { // clean up the resource - Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 0, shardCount: 3, deletionProtectionEnabled: false, zoneDistributionMode: "MULTI_ZONE"}), + Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 0, shardCount: 3, deletionProtectionEnabled: false, zoneDistributionMode: "MULTI_ZONE", maintenanceDay: "MONDAY", maintenanceHours: 1, maintenanceMinutes: 0, maintenanceSeconds: 0, maintenanceNanos: 0}), }, }, }) @@ -131,7 +131,7 @@ func TestAccRedisCluster_updateShardCount(t *testing.T) { Steps: []resource.TestStep{ { // create cluster with shard count 3 - Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 1, shardCount: 3, deletionProtectionEnabled: true, zoneDistributionMode: "MULTI_ZONE"}), + Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 1, shardCount: 3, deletionProtectionEnabled: true, zoneDistributionMode: "MULTI_ZONE", maintenanceDay: "MONDAY", maintenanceHours: 1, maintenanceMinutes: 0, maintenanceSeconds: 0, maintenanceNanos: 0}), }, { ResourceName: "google_redis_cluster.test", @@ -141,7 +141,7 @@ func TestAccRedisCluster_updateShardCount(t *testing.T) { }, { // update shard count to 5 - Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 1, shardCount: 5, deletionProtectionEnabled: true, zoneDistributionMode: "MULTI_ZONE"}), + Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 1, shardCount: 5, deletionProtectionEnabled: true, zoneDistributionMode: "MULTI_ZONE", maintenanceDay: "MONDAY", maintenanceHours: 1, maintenanceMinutes: 0, maintenanceSeconds: 0, maintenanceNanos: 0}), }, { ResourceName: "google_redis_cluster.test", @@ -151,7 +151,7 @@ func TestAccRedisCluster_updateShardCount(t *testing.T) { }, { // clean up the resource - Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 1, shardCount: 5, deletionProtectionEnabled: false, zoneDistributionMode: "MULTI_ZONE"}), + Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 1, shardCount: 5, deletionProtectionEnabled: false, zoneDistributionMode: "MULTI_ZONE", maintenanceDay: "MONDAY", maintenanceHours: 1, maintenanceMinutes: 0, maintenanceSeconds: 0, maintenanceNanos: 0}), }, }, }) @@ -174,6 +174,11 @@ func TestAccRedisCluster_updateRedisConfigs(t *testing.T) { name: name, shardCount: 3, zoneDistributionMode: "MULTI_ZONE", + maintenanceDay: "MONDAY", + maintenanceHours: 1, + maintenanceMinutes: 0, + maintenanceSeconds: 0, + maintenanceNanos: 0, redisConfigs: map[string]string{ "maxmemory-policy": "volatile-ttl", }}), @@ -190,6 +195,11 @@ func TestAccRedisCluster_updateRedisConfigs(t *testing.T) { name: name, shardCount: 3, zoneDistributionMode: "MULTI_ZONE", + maintenanceDay: "MONDAY", + maintenanceHours: 1, + maintenanceMinutes: 0, + maintenanceSeconds: 0, + maintenanceNanos: 0, redisConfigs: map[string]string{ "maxmemory-policy": "allkeys-lru", "maxmemory-clients": "90%", @@ -203,7 +213,7 @@ func TestAccRedisCluster_updateRedisConfigs(t *testing.T) { }, { // remove all redis configs - Config: createOrUpdateRedisCluster(&ClusterParams{name: name, shardCount: 3, zoneDistributionMode: "MULTI_ZONE"}), + Config: createOrUpdateRedisCluster(&ClusterParams{name: name, shardCount: 3, zoneDistributionMode: "MULTI_ZONE", maintenanceDay: "MONDAY", maintenanceHours: 1, maintenanceMinutes: 0, maintenanceSeconds: 0, maintenanceNanos: 0}), }, }, @@ -259,6 +269,11 @@ type ClusterParams struct { redisConfigs map[string]string zoneDistributionMode string zone string + maintenanceDay string + maintenanceHours int + maintenanceMinutes int + maintenanceSeconds int + maintenanceNanos int } func createOrUpdateRedisCluster(params *ClusterParams) string { @@ -277,6 +292,23 @@ func createOrUpdateRedisCluster(params *ClusterParams) string { `, params.zoneDistributionMode, params.zone) } + maintenancePolicyBlock := `` + if params.maintenanceDay != "" { + maintenancePolicyBlock = fmt.Sprintf(` + maintenance_policy { + weekly_maintenance_window { + day = "%s" + start_time { + hours = %d + minutes = %d + seconds = %d + nanos = %d + } + } + } + `, params.maintenanceDay, params.maintenanceHours, params.maintenanceMinutes, params.maintenanceSeconds, params.maintenanceNanos) + } + return fmt.Sprintf(` resource "google_redis_cluster" "test" { provider = google-beta @@ -292,6 +324,7 @@ resource "google_redis_cluster" "test" { redis_configs = { %s } + %s %s depends_on = [ google_network_connectivity_service_connection_policy.default @@ -323,7 +356,7 @@ resource "google_compute_network" "producer_net" { name = "%s" auto_create_subnetworks = false } -`, params.name, params.replicaCount, params.shardCount, params.nodeType, params.deletionProtectionEnabled, strBuilder.String(), zoneDistributionConfigBlock, params.name, params.name, params.name) +`, params.name, params.replicaCount, params.shardCount, params.nodeType, params.deletionProtectionEnabled, strBuilder.String(), zoneDistributionConfigBlock, maintenancePolicyBlock, params.name, params.name, params.name) } {{ end }} diff --git a/mmv1/third_party/terraform/services/resourcemanager/go/resource_google_project_iam_binding_test.go b/mmv1/third_party/terraform/services/resourcemanager/go/resource_google_project_iam_binding_test.go index c0a89bace550..8f4ecf70057b 100644 --- a/mmv1/third_party/terraform/services/resourcemanager/go/resource_google_project_iam_binding_test.go +++ b/mmv1/third_party/terraform/services/resourcemanager/go/resource_google_project_iam_binding_test.go @@ -3,10 +3,9 @@ package resourcemanager_test import ( "fmt" "regexp" - "testing" - "github.com/hashicorp/terraform-provider-google/google/acctest" "github.com/hashicorp/terraform-provider-google/google/envvar" + "testing" "github.com/hashicorp/terraform-plugin-testing/helper/resource" ) @@ -267,7 +266,7 @@ func TestAccProjectIamBinding_invalidMembers(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), Steps: []resource.TestStep{ { - Config: testAccProjectAssociateBindingBasic(pid, org, role, "admin@hashicorptest.com"), + Config: testAccProjectAssociateBindingBasic(pid, org, role, "admin@hashicorptest.com"), ExpectError: regexp.MustCompile("invalid value for members\\.0 \\(IAM members must have one of the values outlined here: https://cloud.google.com/billing/docs/reference/rest/v1/Policy#Binding\\)"), }, { diff --git a/mmv1/third_party/terraform/services/resourcemanager/go/resource_google_project_iam_member_test.go b/mmv1/third_party/terraform/services/resourcemanager/go/resource_google_project_iam_member_test.go index 019e75c5029c..b02cb66743c5 100644 --- a/mmv1/third_party/terraform/services/resourcemanager/go/resource_google_project_iam_member_test.go +++ b/mmv1/third_party/terraform/services/resourcemanager/go/resource_google_project_iam_member_test.go @@ -3,10 +3,9 @@ package resourcemanager_test import ( "fmt" "regexp" - "testing" - "github.com/hashicorp/terraform-provider-google/google/acctest" "github.com/hashicorp/terraform-provider-google/google/envvar" + "testing" "github.com/hashicorp/terraform-plugin-testing/helper/resource" ) @@ -183,7 +182,7 @@ func TestAccProjectIamMember_invalidMembers(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), Steps: []resource.TestStep{ { - Config: testAccProjectAssociateMemberBasic(pid, org, role, "admin@hashicorptest.com"), + Config: testAccProjectAssociateMemberBasic(pid, org, role, "admin@hashicorptest.com"), ExpectError: regexp.MustCompile("invalid value for member \\(IAM members must have one of the values outlined here: https://cloud.google.com/billing/docs/reference/rest/v1/Policy#Binding\\)"), }, { diff --git a/mmv1/third_party/terraform/services/resourcemanager/go/resource_google_project_iam_policy_test.go b/mmv1/third_party/terraform/services/resourcemanager/go/resource_google_project_iam_policy_test.go index 9b923080d453..2324eca039f6 100644 --- a/mmv1/third_party/terraform/services/resourcemanager/go/resource_google_project_iam_policy_test.go +++ b/mmv1/third_party/terraform/services/resourcemanager/go/resource_google_project_iam_policy_test.go @@ -55,7 +55,7 @@ func TestAccProjectIamPolicy_emptyMembers(t *testing.T) { org := envvar.GetTestOrgFromEnv(t) pid := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, + PreCheck: func() { acctest.AccTestPreCheck(t) }, ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), Steps: []resource.TestStep{ { @@ -72,7 +72,7 @@ func TestAccProjectIamPolicy_expanded(t *testing.T) { org := envvar.GetTestOrgFromEnv(t) pid := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, + PreCheck: func() { acctest.AccTestPreCheck(t) }, ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), Steps: []resource.TestStep{ { @@ -92,7 +92,7 @@ func TestAccProjectIamPolicy_basicAuditConfig(t *testing.T) { org := envvar.GetTestOrgFromEnv(t) pid := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, + PreCheck: func() { acctest.AccTestPreCheck(t) }, ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), Steps: []resource.TestStep{ // Create a new project @@ -122,7 +122,7 @@ func TestAccProjectIamPolicy_expandedAuditConfig(t *testing.T) { org := envvar.GetTestOrgFromEnv(t) pid := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, + PreCheck: func() { acctest.AccTestPreCheck(t) }, ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), Steps: []resource.TestStep{ { @@ -141,7 +141,7 @@ func TestAccProjectIamPolicy_withCondition(t *testing.T) { org := envvar.GetTestOrgFromEnv(t) pid := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, + PreCheck: func() { acctest.AccTestPreCheck(t) }, ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), Steps: []resource.TestStep{ // Create a new project @@ -176,7 +176,7 @@ func TestAccProjectIamPolicy_invalidMembers(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), Steps: []resource.TestStep{ { - Config: testAccProjectAssociatePolicyBasic(pid, org, "admin@hashicorptest.com"), + Config: testAccProjectAssociatePolicyBasic(pid, org, "admin@hashicorptest.com"), ExpectError: regexp.MustCompile("invalid value for bindings\\.1\\.members\\.0 \\(IAM members must have one of the values outlined here: https://cloud.google.com/billing/docs/reference/rest/v1/Policy#Binding\\)"), }, { diff --git a/mmv1/third_party/terraform/services/resourcemanager/go/resource_google_project_service.go.tmpl b/mmv1/third_party/terraform/services/resourcemanager/go/resource_google_project_service.go.tmpl index d1a304e96938..ff1fd2a8fb7e 100644 --- a/mmv1/third_party/terraform/services/resourcemanager/go/resource_google_project_service.go.tmpl +++ b/mmv1/third_party/terraform/services/resourcemanager/go/resource_google_project_service.go.tmpl @@ -1,22 +1,24 @@ package resourcemanager import ( - "fmt" - "log" - "regexp" - "strings" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - - tpgserviceusage "github.com/hashicorp/terraform-provider-google/google/services/serviceusage" - "github.com/hashicorp/terraform-provider-google/google/tpgresource" - transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" - "github.com/hashicorp/terraform-provider-google/google/verify" - - "google.golang.org/api/googleapi" - "google.golang.org/api/serviceusage/v1" + "fmt" + "log" + "strings" + "time" + {{- if ne $.TargetVersionName "ga" }} + "regexp" + {{- end }} + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + tpgserviceusage "github.com/hashicorp/terraform-provider-google/google/services/serviceusage" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" + + "google.golang.org/api/googleapi" + "google.golang.org/api/serviceusage/v1" ) // These services can only be enabled as a side-effect of enabling other services, @@ -332,11 +334,11 @@ func disableServiceUsageProjectService(service, project string, d *schema.Resour ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.ServiceUsageServiceBeingActivated}, }) if err != nil { - {{- if not (eq $.TargetVersionName "ga") }} + {{- if ne $.TargetVersionName "ga" }} if res, _ := regexp.MatchString("COMMON_SU_SERVICE_HAS_USAGE", err.Error()); res { - return fmt.Errorf("Error disabling service %q for project %q: %v", service, project, strings.Replace(err.Error(), "check_if_service_has_usage=SKIP", "check_if_service_has_usage_on_destroy=false", -1)) + return fmt.Errorf("Error disabling service %q for project %q: %v", service, project, strings.Replace(err.Error(), "check_if_service_has_usage=SKIP", "check_if_service_has_usage_on_destroy=false", -1)) } - {{- end }} + {{- end }} return fmt.Errorf("Error disabling service %q for project %q: %v", service, project, err) } return nil diff --git a/mmv1/third_party/terraform/services/resourcemanager/go/resource_google_project_service_test.go.tmpl b/mmv1/third_party/terraform/services/resourcemanager/go/resource_google_project_service_test.go.tmpl index 57d13174696f..c5fdaded7489 100644 --- a/mmv1/third_party/terraform/services/resourcemanager/go/resource_google_project_service_test.go.tmpl +++ b/mmv1/third_party/terraform/services/resourcemanager/go/resource_google_project_service_test.go.tmpl @@ -37,21 +37,21 @@ func TestAccProjectService_basic(t *testing.T) { ResourceName: "google_project_service.test", ImportState: true, ImportStateVerify: true, - {{- if ne $.TargetVersionName "ga" }} - ImportStateVerifyIgnore: []string{"disable_on_destroy", "check_if_service_has_usage_on_destroy"}, - {{- else }} - ImportStateVerifyIgnore: []string{"disable_on_destroy"}, - {{- end }} + {{- if ne $.TargetVersionName "ga" }} + ImportStateVerifyIgnore: []string{"disable_on_destroy", "check_if_service_has_usage_on_destroy"}, + {{- else }} + ImportStateVerifyIgnore: []string{"disable_on_destroy"}, + {{- end }} }, { ResourceName: "google_project_service.test2", ImportState: true, ImportStateVerify: true, - {{- if ne $.TargetVersionName "ga" }} - ImportStateVerifyIgnore: []string{"disable_on_destroy", "project", "check_if_service_has_usage_on_destroy"}, - {{- else }} - ImportStateVerifyIgnore: []string{"disable_on_destroy", "project"}, - {{- end }} + {{- if ne $.TargetVersionName "ga" }} + ImportStateVerifyIgnore: []string{"disable_on_destroy", "project", "check_if_service_has_usage_on_destroy"}, + {{- else }} + ImportStateVerifyIgnore: []string{"disable_on_destroy", "project"}, + {{- end }} }, // Use a separate TestStep rather than a CheckDestroy because we need the project to still exist. { @@ -99,11 +99,11 @@ func TestAccProjectService_disableDependentServices(t *testing.T) { ResourceName: "google_project_service.test", ImportState: true, ImportStateVerify: true, - {{- if ne $.TargetVersionName "ga" }} - ImportStateVerifyIgnore: []string{"disable_on_destroy", "check_if_service_has_usage_on_destroy"}, - {{- else }} - ImportStateVerifyIgnore: []string{"disable_on_destroy"}, - {{- end }} + {{- if ne $.TargetVersionName "ga" }} + ImportStateVerifyIgnore: []string{"disable_on_destroy", "check_if_service_has_usage_on_destroy"}, + {{- else }} + ImportStateVerifyIgnore: []string{"disable_on_destroy"}, + {{- end }} }, { Config: testAccProjectService_dependencyRemoved(services, pid, org, billingId), @@ -116,11 +116,11 @@ func TestAccProjectService_disableDependentServices(t *testing.T) { ResourceName: "google_project_service.test", ImportState: true, ImportStateVerify: true, - {{- if ne $.TargetVersionName "ga" }} - ImportStateVerifyIgnore: []string{"disable_on_destroy", "check_if_service_has_usage_on_destroy"}, - {{- else }} - ImportStateVerifyIgnore: []string{"disable_on_destroy"}, - {{- end }} + {{- if ne $.TargetVersionName "ga" }} + ImportStateVerifyIgnore: []string{"disable_on_destroy", "check_if_service_has_usage_on_destroy"}, + {{- else }} + ImportStateVerifyIgnore: []string{"disable_on_destroy"}, + {{- end }} }, { Config: testAccProjectService_dependencyRemoved(services, pid, org, billingId), @@ -387,6 +387,7 @@ resource "google_project_service" "test" { func testAccProjectService_checkUsage(service string, pid, org string, checkIfServiceHasUsage string) string { return fmt.Sprintf(` resource "google_project" "acceptance" { + provider = google-beta project_id = "%s" name = "%s" org_id = "%s" @@ -394,6 +395,7 @@ resource "google_project" "acceptance" { } resource "google_project_service" "test" { + provider = google-beta project = google_project.acceptance.project_id service = "%s" diff --git a/mmv1/third_party/terraform/services/sql/go/resource_sql_database_instance.go.tmpl b/mmv1/third_party/terraform/services/sql/go/resource_sql_database_instance.go.tmpl new file mode 100644 index 000000000000..d85d72dd999a --- /dev/null +++ b/mmv1/third_party/terraform/services/sql/go/resource_sql_database_instance.go.tmpl @@ -0,0 +1,2553 @@ +package sql + +import ( + "context" + "errors" + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/go-cty/cty" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/services/compute" + "github.com/hashicorp/terraform-provider-google/google/services/servicenetworking" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" + + "google.golang.org/api/googleapi" + sqladmin "google.golang.org/api/sqladmin/v1beta4" +) + +// Match fully-qualified or relative URLs +const privateNetworkLinkRegex = "^(?:http(?:s)?://.+/)?projects/(" + verify.ProjectRegex + ")/global/networks/((?:[a-z](?:[-a-z0-9]*[a-z0-9])?))$" + +var sqlDatabaseAuthorizedNetWorkSchemaElem *schema.Resource = &schema.Resource{ + Schema: map[string]*schema.Schema{ + "expiration_time": { + Type: schema.TypeString, + Optional: true, + }, + "name": { + Type: schema.TypeString, + Optional: true, + }, + "value": { + Type: schema.TypeString, + Required: true, + }, + }, +} + +var sqlDatabaseFlagSchemaElem *schema.Resource = &schema.Resource{ + Schema: map[string]*schema.Schema{ + "value": { + Type: schema.TypeString, + Required: true, + Description: `Value of the flag.`, + }, + "name": { + Type: schema.TypeString, + Required: true, + Description: `Name of the flag.`, + }, + }, +} + +var ( + backupConfigurationKeys = []string{ + "settings.0.backup_configuration.0.binary_log_enabled", + "settings.0.backup_configuration.0.enabled", + "settings.0.backup_configuration.0.start_time", + "settings.0.backup_configuration.0.location", + "settings.0.backup_configuration.0.point_in_time_recovery_enabled", + "settings.0.backup_configuration.0.backup_retention_settings", + "settings.0.backup_configuration.0.transaction_log_retention_days", + } + + ipConfigurationKeys = []string{ + "settings.0.ip_configuration.0.authorized_networks", + "settings.0.ip_configuration.0.ipv4_enabled", + "settings.0.ip_configuration.0.private_network", + "settings.0.ip_configuration.0.allocated_ip_range", + "settings.0.ip_configuration.0.enable_private_path_for_google_cloud_services", + "settings.0.ip_configuration.0.psc_config", + "settings.0.ip_configuration.0.ssl_mode", + "settings.0.ip_configuration.0.server_ca_mode", + } + + maintenanceWindowKeys = []string{ + "settings.0.maintenance_window.0.day", + "settings.0.maintenance_window.0.hour", + "settings.0.maintenance_window.0.update_track", + } + + replicaConfigurationKeys = []string{ + "replica_configuration.0.ca_certificate", + "replica_configuration.0.client_certificate", + "replica_configuration.0.client_key", + "replica_configuration.0.connect_retry_interval", + "replica_configuration.0.dump_file_path", + "replica_configuration.0.failover_target", + "replica_configuration.0.master_heartbeat_period", + "replica_configuration.0.password", + "replica_configuration.0.ssl_cipher", + "replica_configuration.0.username", + "replica_configuration.0.verify_server_certificate", + } + + insightsConfigKeys = []string{ + "settings.0.insights_config.0.query_insights_enabled", + "settings.0.insights_config.0.query_string_length", + "settings.0.insights_config.0.record_application_tags", + "settings.0.insights_config.0.record_client_address", + "settings.0.insights_config.0.query_plans_per_minute", + } + + sqlServerAuditConfigurationKeys = []string{ + "settings.0.sql_server_audit_config.0.bucket", + "settings.0.sql_server_audit_config.0.retention_interval", + "settings.0.sql_server_audit_config.0.upload_interval", + } +) + +func ResourceSqlDatabaseInstance() *schema.Resource { + return &schema.Resource{ + Create: resourceSqlDatabaseInstanceCreate, + Read: resourceSqlDatabaseInstanceRead, + Update: resourceSqlDatabaseInstanceUpdate, + Delete: resourceSqlDatabaseInstanceDelete, + Importer: &schema.ResourceImporter{ + State: resourceSqlDatabaseInstanceImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(90 * time.Minute), + Update: schema.DefaultTimeout(90 * time.Minute), + Delete: schema.DefaultTimeout(90 * time.Minute), + }, + + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + customdiff.ForceNewIfChange("settings.0.disk_size", compute.IsDiskShrinkage), + customdiff.ForceNewIfChange("master_instance_name", isMasterInstanceNameSet), + customdiff.IfValueChange("instance_type", isReplicaPromoteRequested, checkPromoteConfigurationsAndUpdateDiff), + privateNetworkCustomizeDiff, + pitrSupportDbCustomizeDiff, + ), + + Schema: map[string]*schema.Schema{ + "region": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: `The region the instance will sit in. Note, Cloud SQL is not available in all regions. A valid region must be provided to use this resource. If a region is not provided in the resource definition, the provider region will be used instead, but this will be an apply-time error for instances if the provider region is not supported with Cloud SQL. If you choose not to provide the region argument for this resource, make sure you understand this.`, + }, + "deletion_protection": { + Type: schema.TypeBool, + Default: true, + Optional: true, + Description: `Used to block Terraform from deleting a SQL Instance. Defaults to true.`, + }, + "settings": { + Type: schema.TypeList, + Optional: true, + Computed: true, + AtLeastOneOf: []string{"settings", "clone"}, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "version": { + Type: schema.TypeInt, + Computed: true, + Description: `Used to make sure changes to the settings block are atomic.`, + }, + "tier": { + Type: schema.TypeString, + Required: true, + Description: `The machine type to use. See tiers for more details and supported versions. Postgres supports only shared-core machine types, and custom machine types such as db-custom-2-13312. See the Custom Machine Type Documentation to learn about specifying custom machine types.`, + }, + "edition": { + Type: schema.TypeString, + Optional: true, + Default: "ENTERPRISE", + ValidateFunc: validation.StringInSlice([]string{"ENTERPRISE", "ENTERPRISE_PLUS"}, false), + Description: `The edition of the instance, can be ENTERPRISE or ENTERPRISE_PLUS.`, + }, + "advanced_machine_features": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "threads_per_core": { + Type: schema.TypeInt, + Optional: true, + Description: `The number of threads per physical core. Can be 1 or 2.`, + }, + }, + }, + }, + "data_cache_config": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `Data cache configurations.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "data_cache_enabled": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: `Whether data cache is enabled for the instance.`, + }, + }, + }, + }, + "activation_policy": { + Type: schema.TypeString, + Optional: true, + Default: "ALWAYS", + Description: `This specifies when the instance should be active. Can be either ALWAYS, NEVER or ON_DEMAND.`, + }, + "active_directory_config": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "domain": { + Type: schema.TypeString, + Required: true, + Description: `Domain name of the Active Directory for SQL Server (e.g., mydomain.com).`, + }, + }, + }, + }, + "deny_maintenance_period": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "end_date": { + Type: schema.TypeString, + Required: true, + Description: `End date before which maintenance will not take place. The date is in format yyyy-mm-dd i.e., 2020-11-01, or mm-dd, i.e., 11-01`, + }, + "start_date": { + Type: schema.TypeString, + Required: true, + Description: `Start date after which maintenance will not take place. The date is in format yyyy-mm-dd i.e., 2020-11-01, or mm-dd, i.e., 11-01`, + }, + "time": { + Type: schema.TypeString, + Required: true, + Description: `Time in UTC when the "deny maintenance period" starts on start_date and ends on end_date. The time is in format: HH:mm:SS, i.e., 00:00:00`, + }, + }, + }, + }, + "sql_server_audit_config": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "bucket": { + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: sqlServerAuditConfigurationKeys, + Description: `The name of the destination bucket (e.g., gs://mybucket).`, + }, + "retention_interval": { + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: sqlServerAuditConfigurationKeys, + Description: `How long to keep generated audit files. A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s"..`, + }, + "upload_interval": { + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: sqlServerAuditConfigurationKeys, + Description: `How often to upload generated audit files. A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".`, + }, + }, + }, + }, + "time_zone": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Description: `The time_zone to be used by the database engine (supported only for SQL Server), in SQL Server timezone format.`, + }, + "availability_type": { + Type: schema.TypeString, + Optional: true, + Default: "ZONAL", + ValidateFunc: validation.StringInSlice([]string{"REGIONAL", "ZONAL"}, false), + Description: `The availability type of the Cloud SQL instance, high availability +(REGIONAL) or single zone (ZONAL). For all instances, ensure that +settings.backup_configuration.enabled is set to true. +For MySQL instances, ensure that settings.backup_configuration.binary_log_enabled is set to true. +For Postgres instances, ensure that settings.backup_configuration.point_in_time_recovery_enabled +is set to true. Defaults to ZONAL.`, + }, + "backup_configuration": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "binary_log_enabled": { + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: backupConfigurationKeys, + Description: `True if binary logging is enabled. If settings.backup_configuration.enabled is false, this must be as well. Can only be used with MySQL.`, + }, + "enabled": { + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: backupConfigurationKeys, + Description: `True if backup configuration is enabled.`, + }, + "start_time": { + Type: schema.TypeString, + Optional: true, + // start_time is randomly assigned if not set + Computed: true, + AtLeastOneOf: backupConfigurationKeys, + Description: `HH:MM format time indicating when backup configuration starts.`, + }, + "location": { + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: backupConfigurationKeys, + Description: `Location of the backup configuration.`, + }, + "point_in_time_recovery_enabled": { + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: backupConfigurationKeys, + Description: `True if Point-in-time recovery is enabled.`, + }, + "transaction_log_retention_days": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + AtLeastOneOf: backupConfigurationKeys, + Description: `The number of days of transaction logs we retain for point in time restore, from 1-7. (For PostgreSQL Enterprise Plus instances, from 1 to 35.)`, + }, + "backup_retention_settings": { + Type: schema.TypeList, + Optional: true, + AtLeastOneOf: backupConfigurationKeys, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "retained_backups": { + Type: schema.TypeInt, + Required: true, + Description: `Number of backups to retain.`, + }, + "retention_unit": { + Type: schema.TypeString, + Optional: true, + Default: "COUNT", + Description: `The unit that 'retainedBackups' represents. Defaults to COUNT`, + }, + }, + }, + }, + }, + }, + }, + "collation": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The name of server instance collation.`, + }, + "database_flags": { + Type: schema.TypeSet, + Optional: true, + Set: schema.HashResource(sqlDatabaseFlagSchemaElem), + Elem: sqlDatabaseFlagSchemaElem, + }, + "disk_autoresize": { + Type: schema.TypeBool, + Optional: true, + Default: true, + Description: `Enables auto-resizing of the storage size. Defaults to true.`, + }, + "disk_autoresize_limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: `The maximum size, in GB, to which storage capacity can be automatically increased. The default value is 0, which specifies that there is no limit.`, + }, + "enable_google_ml_integration": { + Type: schema.TypeBool, + Optional: true, + Description: `Enables Vertex AI Integration.`, + }, + "enable_dataplex_integration": { + Type: schema.TypeBool, + Optional: true, + Description: `Enables Dataplex Integration.`, + }, + "disk_size": { + Type: schema.TypeInt, + Optional: true, + // Default is likely 10gb, but it is undocumented and may change. + Computed: true, + Description: `The size of data disk, in GB. Size of a running instance cannot be reduced but can be increased. The minimum value is 10GB.`, + }, + "disk_type": { + Type: schema.TypeString, + Optional: true, + Default: "PD_SSD", + ForceNew: true, + DiffSuppressFunc: caseDiffDashSuppress, + Description: `The type of data disk: PD_SSD or PD_HDD. Defaults to PD_SSD.`, + }, + "ip_configuration": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "authorized_networks": { + Type: schema.TypeSet, + Optional: true, + Set: schema.HashResource(sqlDatabaseAuthorizedNetWorkSchemaElem), + Elem: sqlDatabaseAuthorizedNetWorkSchemaElem, + AtLeastOneOf: ipConfigurationKeys, + }, + "ipv4_enabled": { + Type: schema.TypeBool, + Optional: true, + Default: true, + AtLeastOneOf: ipConfigurationKeys, + Description: `Whether this Cloud SQL instance should be assigned a public IPV4 address. At least ipv4_enabled must be enabled or a private_network must be configured.`, + }, + "private_network": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.OrEmpty(verify.ValidateRegexp(privateNetworkLinkRegex)), + DiffSuppressFunc: tpgresource.CompareSelfLinkRelativePaths, + AtLeastOneOf: ipConfigurationKeys, + Description: `The VPC network from which the Cloud SQL instance is accessible for private IP. For example, projects/myProject/global/networks/default. Specifying a network enables private IP. At least ipv4_enabled must be enabled or a private_network must be configured. This setting can be updated, but it cannot be removed after it is set.`, + }, + "allocated_ip_range": { + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: ipConfigurationKeys, + Description: `The name of the allocated ip range for the private ip CloudSQL instance. For example: "google-managed-services-default". If set, the instance ip will be created in the allocated range. The range name must comply with RFC 1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])?.`, + }, + "enable_private_path_for_google_cloud_services": { + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: ipConfigurationKeys, + Description: `Whether Google Cloud services such as BigQuery are allowed to access data in this Cloud SQL instance over a private IP connection. SQLSERVER database type is not supported.`, + }, + "psc_config": { + Type: schema.TypeSet, + Optional: true, + Description: `PSC settings for a Cloud SQL instance.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "psc_enabled": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether PSC connectivity is enabled for this instance.`, + }, + "allowed_consumer_projects": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Set: schema.HashString, + Description: `List of consumer projects that are allow-listed for PSC connections to this instance. This instance can be connected to with PSC from any network in these projects. Each consumer project in this list may be represented by a project number (numeric) or by a project id (alphanumeric).`, + }, + }, + }, + }, + "ssl_mode": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice([]string{"ALLOW_UNENCRYPTED_AND_ENCRYPTED", "ENCRYPTED_ONLY", "TRUSTED_CLIENT_CERTIFICATE_REQUIRED"}, false), + Description: `Specify how SSL connection should be enforced in DB connections.`, + AtLeastOneOf: ipConfigurationKeys, + }, + "server_ca_mode": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice([]string{"CA_MODE_UNSPECIFIED", "GOOGLE_MANAGED_INTERNAL_CA", "GOOGLE_MANAGED_CAS_CA"}, false), + Description: `Specify how the server certificate's Certificate Authority is hosted.`, + AtLeastOneOf: ipConfigurationKeys, + }, + }, + }, + }, + "location_preference": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "follow_gae_application": { + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: []string{"settings.0.location_preference.0.follow_gae_application", "settings.0.location_preference.0.zone"}, + Description: `A Google App Engine application whose zone to remain in. Must be in the same region as this instance.`, + }, + "zone": { + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: []string{"settings.0.location_preference.0.follow_gae_application", "settings.0.location_preference.0.zone"}, + Description: `The preferred compute engine zone.`, + }, + "secondary_zone": { + Type: schema.TypeString, + Optional: true, + Description: `The preferred Compute Engine zone for the secondary/failover`, + }, + }, + }, + }, + "maintenance_window": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "day": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(1, 7), + AtLeastOneOf: maintenanceWindowKeys, + Description: `Day of week (1-7), starting on Monday`, + }, + "hour": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 23), + AtLeastOneOf: maintenanceWindowKeys, + Description: `Hour of day (0-23), ignored if day not set`, + }, + "update_track": { + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: maintenanceWindowKeys, + Description: `Receive updates after one week (canary) or after two weeks (stable) or after five weeks (week5) of notification.`, + }, + }, + }, + Description: `Declares a one-hour maintenance window when an Instance can automatically restart to apply updates. The maintenance window is specified in UTC time.`, + }, + "pricing_plan": { + Type: schema.TypeString, + Optional: true, + Default: "PER_USE", + Description: `Pricing plan for this instance, can only be PER_USE.`, + }, + "user_labels": { + Type: schema.TypeMap, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `A set of key/value user label pairs to assign to the instance.`, + }, + "insights_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "query_insights_enabled": { + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: insightsConfigKeys, + Description: `True if Query Insights feature is enabled.`, + }, + "query_string_length": { + Type: schema.TypeInt, + Optional: true, + Default: 1024, + ValidateFunc: validation.IntBetween(256, 4500), + AtLeastOneOf: insightsConfigKeys, + Description: `Maximum query length stored in bytes. Between 256 and 4500. Default to 1024.`, + }, + "record_application_tags": { + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: insightsConfigKeys, + Description: `True if Query Insights will record application tags from query when enabled.`, + }, + "record_client_address": { + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: insightsConfigKeys, + Description: `True if Query Insights will record client address when enabled.`, + }, + "query_plans_per_minute": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ValidateFunc: validation.IntBetween(0, 20), + AtLeastOneOf: insightsConfigKeys, + Description: `Number of query execution plans captured by Insights per minute for all queries combined. Between 0 and 20. Default to 5.`, + }, + }, + }, + Description: `Configuration of Query Insights.`, + }, + "password_validation_policy": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "min_length": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 2147483647), + Description: `Minimum number of characters allowed.`, + }, + "complexity": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"COMPLEXITY_DEFAULT", "COMPLEXITY_UNSPECIFIED"}, false), + Description: `Password complexity.`, + }, + "reuse_interval": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 2147483647), + Description: `Number of previous passwords that cannot be reused.`, + }, + "disallow_username_substring": { + Type: schema.TypeBool, + Optional: true, + Description: `Disallow username as a part of the password.`, + }, + "password_change_interval": { + Type: schema.TypeString, + Optional: true, + Description: `Minimum interval after which the password can be changed. This flag is only supported for PostgresSQL.`, + }, + "enable_password_policy": { + Type: schema.TypeBool, + Required: true, + Description: `Whether the password policy is enabled or not.`, + }, + }, + }, + }, + "connector_enforcement": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice([]string{"NOT_REQUIRED", "REQUIRED"}, false), + Description: `Enables the enforcement of Cloud SQL Auth Proxy or Cloud SQL connectors for all the connections. If enabled, all the direct connections are rejected.`, + }, + "deletion_protection_enabled": { + Type: schema.TypeBool, + Optional: true, + Description: `Configuration to protect against accidental instance deletion.`, + }, + }, + }, + Description: `The settings to use for the database. The configuration is detailed below.`, + }, + + "connection_name": { + Type: schema.TypeString, + Computed: true, + Description: `The connection name of the instance to be used in connection strings. For example, when connecting with Cloud SQL Proxy.`, + }, + "maintenance_version": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `Maintenance version.`, + DiffSuppressFunc: maintenanceVersionDiffSuppress, + }, + "available_maintenance_versions": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: `Available Maintenance versions.`, + }, + "database_version": { + Type: schema.TypeString, + Required: true, + Description: `The MySQL, PostgreSQL or SQL Server (beta) version to use. Supported values include MYSQL_5_6, MYSQL_5_7, MYSQL_8_0, POSTGRES_9_6, POSTGRES_10, POSTGRES_11, POSTGRES_12, POSTGRES_13, POSTGRES_14, POSTGRES_15, SQLSERVER_2017_STANDARD, SQLSERVER_2017_ENTERPRISE, SQLSERVER_2017_EXPRESS, SQLSERVER_2017_WEB. Database Version Policies includes an up-to-date reference of supported versions.`, + }, + + "encryption_key_name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "root_password": { + Type: schema.TypeString, + Optional: true, + Sensitive: true, + Description: `Initial root password. Required for MS SQL Server.`, + }, + "ip_address": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ip_address": { + Type: schema.TypeString, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + "time_to_retire": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + + "first_ip_address": { + Type: schema.TypeString, + Computed: true, + Description: `The first IPv4 address of any type assigned. This is to support accessing the first address in the list in a terraform output when the resource is configured with a count.`, + }, + + "public_ip_address": { + Type: schema.TypeString, + Computed: true, + Description: `IPv4 address assigned. This is a workaround for an issue fixed in Terraform 0.12 but also provides a convenient way to access an IP of a specific type without performing filtering in a Terraform config.`, + }, + + "private_ip_address": { + Type: schema.TypeString, + Computed: true, + Description: `IPv4 address assigned. This is a workaround for an issue fixed in Terraform 0.12 but also provides a convenient way to access an IP of a specific type without performing filtering in a Terraform config.`, + }, + + "name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: `The name of the instance. If the name is left blank, Terraform will randomly generate one when the instance is first created. This is done because after a name is used, it cannot be reused for up to one week.`, + }, + + "master_instance_name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `The name of the instance that will act as the master in the replication setup. Note, this requires the master to have binary_log_enabled set, as well as existing backups.`, + }, + + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: `The ID of the project in which the resource belongs. If it is not provided, the provider project is used.`, + }, + + "instance_type": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `The type of the instance. The valid values are:- 'SQL_INSTANCE_TYPE_UNSPECIFIED', 'CLOUD_SQL_INSTANCE', 'ON_PREMISES_INSTANCE' and 'READ_REPLICA_INSTANCE'.`, + }, + + "replica_configuration": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + // Returned from API on all replicas + Computed: true, + Sensitive: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ca_certificate": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + AtLeastOneOf: replicaConfigurationKeys, + Description: `PEM representation of the trusted CA's x509 certificate.`, + }, + "client_certificate": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + AtLeastOneOf: replicaConfigurationKeys, + Description: `PEM representation of the replica's x509 certificate.`, + }, + "client_key": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + AtLeastOneOf: replicaConfigurationKeys, + Description: `PEM representation of the replica's private key. The corresponding public key in encoded in the client_certificate.`, + }, + "connect_retry_interval": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + AtLeastOneOf: replicaConfigurationKeys, + Description: `The number of seconds between connect retries. MySQL's default is 60 seconds.`, + }, + "dump_file_path": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + AtLeastOneOf: replicaConfigurationKeys, + Description: `Path to a SQL file in Google Cloud Storage from which replica instances are created. Format is gs://bucket/filename.`, + }, + "failover_target": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + AtLeastOneOf: replicaConfigurationKeys, + Description: `Specifies if the replica is the failover target. If the field is set to true the replica will be designated as a failover replica. If the master instance fails, the replica instance will be promoted as the new master instance. Not supported for Postgres`, + }, + "master_heartbeat_period": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + AtLeastOneOf: replicaConfigurationKeys, + Description: `Time in ms between replication heartbeats.`, + }, + "password": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Sensitive: true, + AtLeastOneOf: replicaConfigurationKeys, + Description: `Password for the replication connection.`, + }, + "ssl_cipher": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + AtLeastOneOf: replicaConfigurationKeys, + Description: `Permissible ciphers for use in SSL encryption.`, + }, + "username": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + AtLeastOneOf: replicaConfigurationKeys, + Description: `Username for replication connection.`, + }, + "verify_server_certificate": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + AtLeastOneOf: replicaConfigurationKeys, + Description: `True if the master's common name value is checked during the SSL handshake.`, + }, + }, + }, + Description: `The configuration for replication.`, + }, + "server_ca_cert": { + Type: schema.TypeList, + Computed: true, + Sensitive: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cert": { + Type: schema.TypeString, + Computed: true, + Description: `The CA Certificate used to connect to the SQL Instance via SSL.`, + }, + "common_name": { + Type: schema.TypeString, + Computed: true, + Description: `The CN valid for the CA Cert.`, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `Creation time of the CA Cert.`, + }, + "expiration_time": { + Type: schema.TypeString, + Computed: true, + Description: `Expiration time of the CA Cert.`, + }, + "sha1_fingerprint": { + Type: schema.TypeString, + Computed: true, + Description: `SHA Fingerprint of the CA Cert.`, + }, + }, + }, + }, + "service_account_email_address": { + Type: schema.TypeString, + Computed: true, + Description: `The service account email address assigned to the instance.`, + }, + "self_link": { + Type: schema.TypeString, + Computed: true, + Description: `The URI of the created resource.`, + }, + "psc_service_attachment_link": { + Type: schema.TypeString, + Computed: true, + Description: `The link to service attachment of PSC instance.`, + }, + "dns_name": { + Type: schema.TypeString, + Computed: true, + Description: `The dns name of the instance.`, + }, + "restore_backup_context": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "backup_run_id": { + Type: schema.TypeInt, + Required: true, + Description: `The ID of the backup run to restore from.`, + }, + "instance_id": { + Type: schema.TypeString, + Optional: true, + Description: `The ID of the instance that the backup was taken from.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Description: `The full project ID of the source instance.`, + }, + }, + }, + }, + "clone": { + Type: schema.TypeList, + Optional: true, + Computed: false, + AtLeastOneOf: []string{"settings", "clone"}, + Description: `Configuration for creating a new instance as a clone of another instance.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "source_instance_name": { + Type: schema.TypeString, + Required: true, + Description: `The name of the instance from which the point in time should be restored.`, + }, + "point_in_time": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: tpgresource.TimestampDiffSuppress(time.RFC3339Nano), + Description: `The timestamp of the point in time that should be restored.`, + }, + "preferred_zone": { + Type: schema.TypeString, + Optional: true, + Description: `(Point-in-time recovery for PostgreSQL only) Clone to an instance in the specified zone. If no zone is specified, clone to the same zone as the source instance.`, + }, + "database_names": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: `(SQL Server only, use with point_in_time) clone only the specified databases from the source instance. Clone all databases if empty.`, + }, + "allocated_ip_range": { + Type: schema.TypeString, + Optional: true, + Description: `The name of the allocated ip range for the private ip CloudSQL instance. For example: "google-managed-services-default". If set, the cloned instance ip will be created in the allocated range. The range name must comply with [RFC 1035](https://tools.ietf.org/html/rfc1035). Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])?.`, + }, + }, + }, + }, + }, + UseJSONNumber: true, + } +} + +// Makes private_network ForceNew if it is changing from set to nil. The API returns an error +// if this change is attempted in-place. +func privateNetworkCustomizeDiff(_ context.Context, d *schema.ResourceDiff, meta interface{}) error { + old, new := d.GetChange("settings.0.ip_configuration.0.private_network") + + if old != "" && new == "" { + if err := d.ForceNew("settings.0.ip_configuration.0.private_network"); err != nil { + return err + } + } + + return nil +} + +// helper function to see if string within list contains a particular substring +func stringContainsSlice(arr []string, str string) bool { + for _, i := range arr { + if strings.Contains(str, i) { + return true + } + } + return false +} + +// Point in time recovery for MySQL database instances needs binary_log_enabled set to true and +// not point_in_time_recovery_enabled, which is confusing to users. This checks for +// point_in_time_recovery_enabled being set to a non-PostgreSQL and non-SQLServer database instances and suggests +// binary_log_enabled. +func pitrSupportDbCustomizeDiff(_ context.Context, diff *schema.ResourceDiff, v interface{}) error { + pitr := diff.Get("settings.0.backup_configuration.0.point_in_time_recovery_enabled").(bool) + dbVersion := diff.Get("database_version").(string) + dbVersionPitrValid := []string{"POSTGRES", "SQLSERVER"} + if pitr && !stringContainsSlice(dbVersionPitrValid, dbVersion) { + return fmt.Errorf("point_in_time_recovery_enabled is only available for the following %v. You may want to consider using binary_log_enabled instead and remove point_in_time_recovery_enabled (removing point_in_time_recovery_enabled and adding binary_log_enabled will enable pitr for MYSQL)", dbVersionPitrValid) + } + return nil +} + +func resourceSqlDatabaseInstanceCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + region, err := tpgresource.GetRegion(d, config) + if err != nil { + return err + } + + var name string + if v, ok := d.GetOk("name"); ok { + name = v.(string) + } else { + name = id.UniqueId() + } + + if err := d.Set("name", name); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + + // SQL Instances that fail to create are expensive- see https://github.com/hashicorp/terraform-provider-google/issues/7154 + // We can fail fast to stop instance names from getting reserved. + network := d.Get("settings.0.ip_configuration.0.private_network").(string) + if network != "" { + err = sqlDatabaseInstanceServiceNetworkPrecheck(d, config, userAgent, network) + if err != nil { + return err + } + } + + databaseVersion := d.Get("database_version").(string) + + instance := &sqladmin.DatabaseInstance{ + Name: name, + Region: region, + DatabaseVersion: databaseVersion, + MasterInstanceName: d.Get("master_instance_name").(string), + ReplicaConfiguration: expandReplicaConfiguration(d.Get("replica_configuration").([]interface{})), + } + + cloneContext, cloneSource := expandCloneContext(d.Get("clone").([]interface{})) + + s, ok := d.GetOk("settings") + desiredSettings := expandSqlDatabaseInstanceSettings(s.([]interface{}), databaseVersion) + if ok { + instance.Settings = desiredSettings + } + + if _, ok := d.GetOk("maintenance_version"); ok { + instance.MaintenanceVersion = d.Get("maintenance_version").(string) + } + + if _, ok := d.GetOk("instance_type"); ok { + instance.InstanceType = d.Get("instance_type").(string) + } + + instance.RootPassword = d.Get("root_password").(string) + + // Modifying a replica during Create can cause problems if the master is + // modified at the same time. Lock the master until we're done in order + // to prevent that. + if !sqlDatabaseIsMaster(d) { + transport_tpg.MutexStore.Lock(instanceMutexKey(project, instance.MasterInstanceName)) + defer transport_tpg.MutexStore.Unlock(instanceMutexKey(project, instance.MasterInstanceName)) + } + + if k, ok := d.GetOk("encryption_key_name"); ok { + instance.DiskEncryptionConfiguration = &sqladmin.DiskEncryptionConfiguration{ + KmsKeyName: k.(string), + } + } + + var patchData *sqladmin.DatabaseInstance + + // BinaryLogging can be enabled on replica instances but only after creation. + if instance.MasterInstanceName != "" && instance.Settings != nil && instance.Settings.BackupConfiguration != nil && instance.Settings.BackupConfiguration.BinaryLogEnabled { + settingsCopy := expandSqlDatabaseInstanceSettings(s.([]interface{}), databaseVersion) + bc := settingsCopy.BackupConfiguration + patchData = &sqladmin.DatabaseInstance{Settings: &sqladmin.Settings{BackupConfiguration: bc}} + + instance.Settings.BackupConfiguration.BinaryLogEnabled = false + } + + var op *sqladmin.Operation + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (operr error) { + if cloneContext != nil { + cloneContext.DestinationInstanceName = name + clodeReq := sqladmin.InstancesCloneRequest{CloneContext: cloneContext} + op, operr = config.NewSqlAdminClient(userAgent).Instances.Clone(project, cloneSource, &clodeReq).Do() + } else { + op, operr = config.NewSqlAdminClient(userAgent).Instances.Insert(project, instance).Do() + } + return operr + }, + Timeout: d.Timeout(schema.TimeoutCreate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsSqlOperationInProgressError}, + }) + if err != nil { + return fmt.Errorf("Error, failed to create instance %s: %s", instance.Name, err) + } + + id, err := tpgresource.ReplaceVars(d, config, "projects/{{"{{"}}project{{"}}"}}/instances/{{"{{"}}name{{"}}"}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = SqlAdminOperationWaitTime(config, op, project, "Create Instance", userAgent, d.Timeout(schema.TimeoutCreate)) + if err != nil { + d.SetId("") + return err + } + + // If a default root user was created with a wildcard ('%') hostname, delete it. Note it + // appears to only be created for certain types of databases, like MySQL. + // Users in a replica instance are inherited from the master instance and should be left alone. + // This deletion is done immediately after the instance is created, in order to minimize the + // risk of it being left on the instance, which would present a security concern. + if sqlDatabaseIsMaster(d) { + var users *sqladmin.UsersListResponse + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() error { + users, err = config.NewSqlAdminClient(userAgent).Users.List(project, instance.Name).Do() + return err + }, + Timeout: d.Timeout(schema.TimeoutRead), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsSqlOperationInProgressError}, + }) + if err != nil { + return fmt.Errorf("Error, attempting to list users associated with instance %s: %s", instance.Name, err) + } + for _, u := range users.Items { + if u.Name == "root" && u.Host == "%" { + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() error { + op, err = config.NewSqlAdminClient(userAgent).Users.Delete(project, instance.Name).Host(u.Host).Name(u.Name).Do() + if err == nil { + err = SqlAdminOperationWaitTime(config, op, project, "Delete default root User", userAgent, d.Timeout(schema.TimeoutCreate)) + } + return err + }, + }) + if err != nil { + return fmt.Errorf("Error, failed to delete default 'root'@'*' u, but the database was created successfully: %s", err) + } + } + } + } + + // patch any fields that need to be sent postcreation + if patchData != nil { + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (rerr error) { + op, rerr = config.NewSqlAdminClient(userAgent).Instances.Patch(project, instance.Name, patchData).Do() + return rerr + }, + Timeout: d.Timeout(schema.TimeoutUpdate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsSqlOperationInProgressError}, + }) + if err != nil { + return fmt.Errorf("Error, failed to update instance settings for %s: %s", instance.Name, err) + } + err = SqlAdminOperationWaitTime(config, op, project, "Patch Instance", userAgent, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } + + err = resourceSqlDatabaseInstanceRead(d, meta) + if err != nil { + return err + } + + // Refresh settings from read as they may have defaulted from the API + s = d.Get("settings") + // If we've created an instance as a clone, we need to update it to set any user defined settings + if len(s.([]interface{})) != 0 && cloneContext != nil && desiredSettings != nil { + instanceUpdate := &sqladmin.DatabaseInstance{ + Settings: desiredSettings, + } + _settings := s.([]interface{})[0].(map[string]interface{}) + instanceUpdate.Settings.SettingsVersion = int64(_settings["version"].(int)) + var op *sqladmin.Operation + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (rerr error) { + op, rerr = config.NewSqlAdminClient(userAgent).Instances.Update(project, name, instanceUpdate).Do() + return rerr + }, + Timeout: d.Timeout(schema.TimeoutUpdate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsSqlOperationInProgressError}, + }) + if err != nil { + return fmt.Errorf("Error, failed to update instance settings for %s: %s", instance.Name, err) + } + + err = SqlAdminOperationWaitTime(config, op, project, "Update Instance", userAgent, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + + // Refresh the state of the instance after updating the settings + err = resourceSqlDatabaseInstanceRead(d, meta) + if err != nil { + return err + } + } + + // Perform a backup restore if the backup context exists + if r, ok := d.GetOk("restore_backup_context"); ok { + err = sqlDatabaseInstanceRestoreFromBackup(d, config, userAgent, project, name, r) + if err != nil { + return err + } + } + + return nil +} + +// Available fields for settings vary between database versions. +func expandSqlDatabaseInstanceSettings(configured []interface{}, databaseVersion string) *sqladmin.Settings { + if len(configured) == 0 || configured[0] == nil { + return nil + } + + _settings := configured[0].(map[string]interface{}) + settings := &sqladmin.Settings{ + // Version is unset in Create but is set during update + SettingsVersion: int64(_settings["version"].(int)), + DataCacheConfig: expandDataCacheConfig(_settings["data_cache_config"].([]interface{})), + Tier: _settings["tier"].(string), + Edition: _settings["edition"].(string), + AdvancedMachineFeatures: expandSqlServerAdvancedMachineFeatures(_settings["advanced_machine_features"].([]interface{})), + ForceSendFields: []string{"StorageAutoResize", "EnableGoogleMlIntegration", "EnableDataplexIntegration"}, + ActivationPolicy: _settings["activation_policy"].(string), + ActiveDirectoryConfig: expandActiveDirectoryConfig(_settings["active_directory_config"].([]interface{})), + DenyMaintenancePeriods: expandDenyMaintenancePeriod(_settings["deny_maintenance_period"].([]interface{})), + SqlServerAuditConfig: expandSqlServerAuditConfig(_settings["sql_server_audit_config"].([]interface{})), + TimeZone: _settings["time_zone"].(string), + AvailabilityType: _settings["availability_type"].(string), + ConnectorEnforcement: _settings["connector_enforcement"].(string), + Collation: _settings["collation"].(string), + DataDiskSizeGb: int64(_settings["disk_size"].(int)), + DataDiskType: _settings["disk_type"].(string), + PricingPlan: _settings["pricing_plan"].(string), + DeletionProtectionEnabled: _settings["deletion_protection_enabled"].(bool), + EnableGoogleMlIntegration: _settings["enable_google_ml_integration"].(bool), + EnableDataplexIntegration: _settings["enable_dataplex_integration"].(bool), + UserLabels: tpgresource.ConvertStringMap(_settings["user_labels"].(map[string]interface{})), + BackupConfiguration: expandBackupConfiguration(_settings["backup_configuration"].([]interface{})), + DatabaseFlags: expandDatabaseFlags(_settings["database_flags"].(*schema.Set).List()), + IpConfiguration: expandIpConfiguration(_settings["ip_configuration"].([]interface{}), databaseVersion), + LocationPreference: expandLocationPreference(_settings["location_preference"].([]interface{})), + MaintenanceWindow: expandMaintenanceWindow(_settings["maintenance_window"].([]interface{})), + InsightsConfig: expandInsightsConfig(_settings["insights_config"].([]interface{})), + PasswordValidationPolicy: expandPasswordValidationPolicy(_settings["password_validation_policy"].([]interface{})), + } + + resize := _settings["disk_autoresize"].(bool) + settings.StorageAutoResize = &resize + settings.StorageAutoResizeLimit = int64(_settings["disk_autoresize_limit"].(int)) + + return settings +} + +func expandReplicaConfiguration(configured []interface{}) *sqladmin.ReplicaConfiguration { + if len(configured) == 0 || configured[0] == nil { + return nil + } + + _replicaConfiguration := configured[0].(map[string]interface{}) + return &sqladmin.ReplicaConfiguration{ + FailoverTarget: _replicaConfiguration["failover_target"].(bool), + + // MysqlReplicaConfiguration has been flattened in the TF schema, so + // we'll keep it flat here instead of another expand method. + MysqlReplicaConfiguration: &sqladmin.MySqlReplicaConfiguration{ + CaCertificate: _replicaConfiguration["ca_certificate"].(string), + ClientCertificate: _replicaConfiguration["client_certificate"].(string), + ClientKey: _replicaConfiguration["client_key"].(string), + ConnectRetryInterval: int64(_replicaConfiguration["connect_retry_interval"].(int)), + DumpFilePath: _replicaConfiguration["dump_file_path"].(string), + MasterHeartbeatPeriod: int64(_replicaConfiguration["master_heartbeat_period"].(int)), + Password: _replicaConfiguration["password"].(string), + SslCipher: _replicaConfiguration["ssl_cipher"].(string), + Username: _replicaConfiguration["username"].(string), + VerifyServerCertificate: _replicaConfiguration["verify_server_certificate"].(bool), + }, + } +} + +func expandCloneContext(configured []interface{}) (*sqladmin.CloneContext, string) { + if len(configured) == 0 || configured[0] == nil { + return nil, "" + } + + _cloneConfiguration := configured[0].(map[string]interface{}) + + databaseNames := []string{} + rawDatabaseNames := _cloneConfiguration["database_names"].([]interface{}) + for _, db := range rawDatabaseNames { + databaseNames = append(databaseNames, db.(string)) + } + + return &sqladmin.CloneContext{ + PointInTime: _cloneConfiguration["point_in_time"].(string), + PreferredZone: _cloneConfiguration["preferred_zone"].(string), + DatabaseNames: databaseNames, + AllocatedIpRange: _cloneConfiguration["allocated_ip_range"].(string), + }, _cloneConfiguration["source_instance_name"].(string) +} + +func expandMaintenanceWindow(configured []interface{}) *sqladmin.MaintenanceWindow { + if len(configured) == 0 || configured[0] == nil { + return nil + } + + window := configured[0].(map[string]interface{}) + return &sqladmin.MaintenanceWindow{ + Day: int64(window["day"].(int)), + Hour: int64(window["hour"].(int)), + UpdateTrack: window["update_track"].(string), + ForceSendFields: []string{"Hour"}, + } +} + +func expandLocationPreference(configured []interface{}) *sqladmin.LocationPreference { + if len(configured) == 0 || configured[0] == nil { + return nil + } + + _locationPreference := configured[0].(map[string]interface{}) + return &sqladmin.LocationPreference{ + FollowGaeApplication: _locationPreference["follow_gae_application"].(string), + Zone: _locationPreference["zone"].(string), + SecondaryZone: _locationPreference["secondary_zone"].(string), + } +} + +func expandIpConfiguration(configured []interface{}, databaseVersion string) *sqladmin.IpConfiguration { + if len(configured) == 0 || configured[0] == nil { + return nil + } + + _ipConfiguration := configured[0].(map[string]interface{}) + + forceSendFields := []string{"Ipv4Enabled"} + nullFields := []string{"RequireSsl"} + + if !strings.HasPrefix(databaseVersion, "SQLSERVER") { + forceSendFields = append(forceSendFields, "EnablePrivatePathForGoogleCloudServices") + } + + return &sqladmin.IpConfiguration{ + Ipv4Enabled: _ipConfiguration["ipv4_enabled"].(bool), + PrivateNetwork: _ipConfiguration["private_network"].(string), + AllocatedIpRange: _ipConfiguration["allocated_ip_range"].(string), + AuthorizedNetworks: expandAuthorizedNetworks(_ipConfiguration["authorized_networks"].(*schema.Set).List()), + EnablePrivatePathForGoogleCloudServices: _ipConfiguration["enable_private_path_for_google_cloud_services"].(bool), + ForceSendFields: forceSendFields, + NullFields: nullFields, + PscConfig: expandPscConfig(_ipConfiguration["psc_config"].(*schema.Set).List()), + SslMode: _ipConfiguration["ssl_mode"].(string), + ServerCaMode: _ipConfiguration["server_ca_mode"].(string), + } +} + +func expandPscConfig(configured []interface{}) *sqladmin.PscConfig { + for _, _pscConfig := range configured { + _entry := _pscConfig.(map[string]interface{}) + return &sqladmin.PscConfig{ + PscEnabled: _entry["psc_enabled"].(bool), + AllowedConsumerProjects: tpgresource.ConvertStringArr(_entry["allowed_consumer_projects"].(*schema.Set).List()), + } + } + + return nil +} + +func expandAuthorizedNetworks(configured []interface{}) []*sqladmin.AclEntry { + an := make([]*sqladmin.AclEntry, 0, len(configured)) + for _, _acl := range configured { + _entry := _acl.(map[string]interface{}) + an = append(an, &sqladmin.AclEntry{ + ExpirationTime: _entry["expiration_time"].(string), + Name: _entry["name"].(string), + Value: _entry["value"].(string), + }) + } + + return an +} + +func expandDatabaseFlags(configured []interface{}) []*sqladmin.DatabaseFlags { + databaseFlags := make([]*sqladmin.DatabaseFlags, 0, len(configured)) + for _, _flag := range configured { + if _flag == nil { + continue + } + _entry := _flag.(map[string]interface{}) + + databaseFlags = append(databaseFlags, &sqladmin.DatabaseFlags{ + Name: _entry["name"].(string), + Value: _entry["value"].(string), + }) + } + return databaseFlags +} + +func expandDataCacheConfig(configured interface{}) *sqladmin.DataCacheConfig { + l := configured.([]interface{}) + if len(l) == 0 { + return nil + } + config := l[0].(map[string]interface{}) + return &sqladmin.DataCacheConfig{ + DataCacheEnabled: config["data_cache_enabled"].(bool), + } +} + +func expandBackupConfiguration(configured []interface{}) *sqladmin.BackupConfiguration { + if len(configured) == 0 || configured[0] == nil { + return nil + } + + _backupConfiguration := configured[0].(map[string]interface{}) + return &sqladmin.BackupConfiguration{ + BinaryLogEnabled: _backupConfiguration["binary_log_enabled"].(bool), + BackupRetentionSettings: expandBackupRetentionSettings(_backupConfiguration["backup_retention_settings"]), + Enabled: _backupConfiguration["enabled"].(bool), + StartTime: _backupConfiguration["start_time"].(string), + Location: _backupConfiguration["location"].(string), + TransactionLogRetentionDays: int64(_backupConfiguration["transaction_log_retention_days"].(int)), + PointInTimeRecoveryEnabled: _backupConfiguration["point_in_time_recovery_enabled"].(bool), + ForceSendFields: []string{"BinaryLogEnabled", "Enabled", "PointInTimeRecoveryEnabled"}, + } +} + +func expandBackupRetentionSettings(configured interface{}) *sqladmin.BackupRetentionSettings { + l := configured.([]interface{}) + if len(l) == 0 { + return nil + } + config := l[0].(map[string]interface{}) + return &sqladmin.BackupRetentionSettings{ + RetainedBackups: int64(config["retained_backups"].(int)), + RetentionUnit: config["retention_unit"].(string), + } +} + +func expandActiveDirectoryConfig(configured interface{}) *sqladmin.SqlActiveDirectoryConfig { + l := configured.([]interface{}) + if len(l) == 0 { + return nil + } + + config := l[0].(map[string]interface{}) + return &sqladmin.SqlActiveDirectoryConfig{ + Domain: config["domain"].(string), + } +} + +func expandDenyMaintenancePeriod(configured []interface{}) []*sqladmin.DenyMaintenancePeriod { + denyMaintenancePeriod := make([]*sqladmin.DenyMaintenancePeriod, 0, len(configured)) + + for _, _flag := range configured { + if _flag == nil { + continue + } + _entry := _flag.(map[string]interface{}) + + denyMaintenancePeriod = append(denyMaintenancePeriod, &sqladmin.DenyMaintenancePeriod{ + EndDate: _entry["end_date"].(string), + StartDate: _entry["start_date"].(string), + Time: _entry["time"].(string), + }) + } + return denyMaintenancePeriod + +} + +func expandSqlServerAdvancedMachineFeatures(configured interface{}) *sqladmin.AdvancedMachineFeatures { + l := configured.([]interface{}) + if len(l) == 0 { + return nil + } + + config := l[0].(map[string]interface{}) + return &sqladmin.AdvancedMachineFeatures{ + ThreadsPerCore: int64(config["threads_per_core"].(int)), + } +} + +func expandSqlServerAuditConfig(configured interface{}) *sqladmin.SqlServerAuditConfig { + l := configured.([]interface{}) + if len(l) == 0 { + return nil + } + + config := l[0].(map[string]interface{}) + return &sqladmin.SqlServerAuditConfig{ + Bucket: config["bucket"].(string), + RetentionInterval: config["retention_interval"].(string), + UploadInterval: config["upload_interval"].(string), + } +} + +func expandInsightsConfig(configured []interface{}) *sqladmin.InsightsConfig { + if len(configured) == 0 || configured[0] == nil { + return nil + } + + _insightsConfig := configured[0].(map[string]interface{}) + return &sqladmin.InsightsConfig{ + QueryInsightsEnabled: _insightsConfig["query_insights_enabled"].(bool), + QueryStringLength: int64(_insightsConfig["query_string_length"].(int)), + RecordApplicationTags: _insightsConfig["record_application_tags"].(bool), + RecordClientAddress: _insightsConfig["record_client_address"].(bool), + QueryPlansPerMinute: int64(_insightsConfig["query_plans_per_minute"].(int)), + } +} + +func expandPasswordValidationPolicy(configured []interface{}) *sqladmin.PasswordValidationPolicy { + if len(configured) == 0 || configured[0] == nil { + return nil + } + + _passwordValidationPolicy := configured[0].(map[string]interface{}) + return &sqladmin.PasswordValidationPolicy{ + MinLength: int64(_passwordValidationPolicy["min_length"].(int)), + Complexity: _passwordValidationPolicy["complexity"].(string), + ReuseInterval: int64(_passwordValidationPolicy["reuse_interval"].(int)), + DisallowUsernameSubstring: _passwordValidationPolicy["disallow_username_substring"].(bool), + PasswordChangeInterval: _passwordValidationPolicy["password_change_interval"].(string), + EnablePasswordPolicy: _passwordValidationPolicy["enable_password_policy"].(bool), + } +} + +func resourceSqlDatabaseInstanceRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + var instance *sqladmin.DatabaseInstance + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (rerr error) { + instance, rerr = config.NewSqlAdminClient(userAgent).Instances.Get(project, d.Get("name").(string)).Do() + return rerr + }, + Timeout: d.Timeout(schema.TimeoutRead), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsSqlOperationInProgressError}, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("SQL Database Instance %q", d.Get("name").(string))) + } + + if err := d.Set("name", instance.Name); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + if err := d.Set("region", instance.Region); err != nil { + return fmt.Errorf("Error setting region: %s", err) + } + if err := d.Set("database_version", instance.DatabaseVersion); err != nil { + return fmt.Errorf("Error setting database_version: %s", err) + } + if err := d.Set("connection_name", instance.ConnectionName); err != nil { + return fmt.Errorf("Error setting connection_name: %s", err) + } + if err := d.Set("maintenance_version", instance.MaintenanceVersion); err != nil { + return fmt.Errorf("Error setting maintenance_version: %s", err) + } + if err := d.Set("available_maintenance_versions", instance.AvailableMaintenanceVersions); err != nil { + return fmt.Errorf("Error setting available_maintenance_version: %s", err) + } + if err := d.Set("service_account_email_address", instance.ServiceAccountEmailAddress); err != nil { + return fmt.Errorf("Error setting service_account_email_address: %s", err) + } + if err := d.Set("instance_type", instance.InstanceType); err != nil { + return fmt.Errorf("Error setting instance_type: %s", err) + } + if err := d.Set("settings", flattenSettings(instance.Settings, d)); err != nil { + log.Printf("[WARN] Failed to set SQL Database Instance Settings") + } + + if instance.DiskEncryptionConfiguration != nil { + if err := d.Set("encryption_key_name", instance.DiskEncryptionConfiguration.KmsKeyName); err != nil { + return fmt.Errorf("Error setting encryption_key_name: %s", err) + } + } + + if err := d.Set("replica_configuration", flattenReplicaConfiguration(instance.ReplicaConfiguration, d)); err != nil { + log.Printf("[WARN] Failed to set SQL Database Instance Replica Configuration") + } + ipAddresses := flattenIpAddresses(instance.IpAddresses) + if err := d.Set("ip_address", ipAddresses); err != nil { + log.Printf("[WARN] Failed to set SQL Database Instance IP Addresses") + } + + if len(ipAddresses) > 0 { + if err := d.Set("first_ip_address", ipAddresses[0]["ip_address"]); err != nil { + return fmt.Errorf("Error setting first_ip_address: %s", err) + } + } + + publicIpAddress := "" + privateIpAddress := "" + for _, ip := range instance.IpAddresses { + if publicIpAddress == "" && ip.Type == "PRIMARY" { + publicIpAddress = ip.IpAddress + } + + if privateIpAddress == "" && ip.Type == "PRIVATE" { + privateIpAddress = ip.IpAddress + } + } + + if err := d.Set("public_ip_address", publicIpAddress); err != nil { + return fmt.Errorf("Error setting public_ip_address: %s", err) + } + if err := d.Set("private_ip_address", privateIpAddress); err != nil { + return fmt.Errorf("Error setting private_ip_address: %s", err) + } + + if err := d.Set("server_ca_cert", flattenServerCaCerts([]*sqladmin.SslCert{instance.ServerCaCert})); err != nil { + log.Printf("[WARN] Failed to set SQL Database CA Certificate") + } + + if err := d.Set("master_instance_name", strings.TrimPrefix(instance.MasterInstanceName, project+":")); err != nil { + return fmt.Errorf("Error setting master_instance_name: %s", err) + } + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("self_link", instance.SelfLink); err != nil { + return fmt.Errorf("Error setting self_link: %s", err) + } + if err := d.Set("psc_service_attachment_link", instance.PscServiceAttachmentLink); err != nil { + return fmt.Errorf("Error setting psc_service_attachment_link: %s", err) + } + if err := d.Set("dns_name", instance.DnsName); err != nil { + return fmt.Errorf("Error setting dns_name: %s", err) + } + d.SetId(instance.Name) + + return nil +} + +func resourceSqlDatabaseInstanceUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + var maintenance_version string + if v, ok := d.GetOk("maintenance_version"); ok { + maintenance_version = v.(string) + } + + promoteReadReplicaRequired := false + if d.HasChange("instance_type") { + oldInstanceType, newInstanceType := d.GetChange("instance_type") + + if isReplicaPromoteRequested(nil, oldInstanceType, newInstanceType, nil) { + err = checkPromoteConfigurations(d) + if err != nil { + return err + } + + promoteReadReplicaRequired = true + } + } + + desiredSetting := d.Get("settings") + var op *sqladmin.Operation + var instance *sqladmin.DatabaseInstance + + databaseVersion := d.Get("database_version").(string) + + // Check if the activation policy is being updated. If it is being changed to ALWAYS this should be done first. + if d.HasChange("settings.0.activation_policy") && d.Get("settings.0.activation_policy").(string) == "ALWAYS" { + instance = &sqladmin.DatabaseInstance{Settings: &sqladmin.Settings{ActivationPolicy: "ALWAYS"}} + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (rerr error) { + op, rerr = config.NewSqlAdminClient(userAgent).Instances.Patch(project, d.Get("name").(string), instance).Do() + return rerr + }, + Timeout: d.Timeout(schema.TimeoutUpdate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsSqlOperationInProgressError}, + }) + if err != nil { + return fmt.Errorf("Error, failed to patch instance settings for %s: %s", instance.Name, err) + } + err = SqlAdminOperationWaitTime(config, op, project, "Patch Instance", userAgent, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + err = resourceSqlDatabaseInstanceRead(d, meta) + if err != nil { + return err + } + } + + // Check if the database version is being updated, because patching database version is an atomic operation and can not be + // performed with other fields, we first patch database version before updating the rest of the fields. + if d.HasChange("database_version") { + instance = &sqladmin.DatabaseInstance{DatabaseVersion: databaseVersion} + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (rerr error) { + op, rerr = config.NewSqlAdminClient(userAgent).Instances.Patch(project, d.Get("name").(string), instance).Do() + return rerr + }, + Timeout: d.Timeout(schema.TimeoutUpdate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsSqlOperationInProgressError}, + }) + if err != nil { + return fmt.Errorf("Error, failed to patch instance settings for %s: %s", instance.Name, err) + } + err = SqlAdminOperationWaitTime(config, op, project, "Patch Instance", userAgent, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + err = resourceSqlDatabaseInstanceRead(d, meta) + if err != nil { + return err + } + } + + // Check if the root_password is being updated, because updating root_password is an atomic operation and can not be + // performed with other fields, we first update root password before updating the rest of the fields. + if d.HasChange("root_password") { + oldPwd, newPwd := d.GetChange("root_password") + password := newPwd.(string) + dv := d.Get("database_version").(string) + name := "" + host := "" + if strings.Contains(dv, "MYSQL") { + name = "root" + host = "%" + } else if strings.Contains(dv, "POSTGRES") { + name = "postgres" + } else if strings.Contains(dv, "SQLSERVER") { + name = "sqlserver" + if len(password) == 0 { + if err := d.Set("root_password", oldPwd.(string)); err != nil { + return fmt.Errorf("Error re-setting root_password: %s", err) + } + return fmt.Errorf("Error, root password cannot be empty for SQL Server instance.") + } + } else { + if err := d.Set("root_password", oldPwd.(string)); err != nil { + return fmt.Errorf("Error re-setting root_password: %s", err) + } + return fmt.Errorf("Error, invalid database version") + } + instance := d.Get("name").(string) + + user := &sqladmin.User{ + Name: name, + Instance: instance, + Password: password, + } + + transport_tpg.MutexStore.Lock(instanceMutexKey(project, instance)) + defer transport_tpg.MutexStore.Unlock(instanceMutexKey(project, instance)) + var op *sqladmin.Operation + updateFunc := func() error { + op, err = config.NewSqlAdminClient(userAgent).Users.Update(project, instance, user).Host(host).Name(name).Do() + return err + } + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: updateFunc, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + if err := d.Set("root_password", oldPwd.(string)); err != nil { + return fmt.Errorf("Error re-setting root_password: %s", err) + } + return fmt.Errorf("Error, failed to update root_password : %s", err) + } + + err = SqlAdminOperationWaitTime(config, op, project, "Insert User", userAgent, d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + if err := d.Set("root_password", oldPwd.(string)); err != nil { + return fmt.Errorf("Error re-setting root_password: %s", err) + } + return fmt.Errorf("Error, failed to update root_password : %s", err) + } + } + + // Check if the maintenance version is being updated, because patching maintenance version is an atomic operation and can not be + // performed with other fields, we first patch maintenance version before updating the rest of the fields. + if d.HasChange("maintenance_version") { + instance = &sqladmin.DatabaseInstance{MaintenanceVersion: maintenance_version} + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (rerr error) { + op, rerr = config.NewSqlAdminClient(userAgent).Instances.Patch(project, d.Get("name").(string), instance).Do() + return rerr + }, + Timeout: d.Timeout(schema.TimeoutUpdate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsSqlOperationInProgressError}, + }) + if err != nil { + return fmt.Errorf("Error, failed to patch instance settings for %s: %s", instance.Name, err) + } + err = SqlAdminOperationWaitTime(config, op, project, "Patch Instance", userAgent, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + err = resourceSqlDatabaseInstanceRead(d, meta) + if err != nil { + return err + } + } + + if promoteReadReplicaRequired { + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (rerr error) { + op, rerr = config.NewSqlAdminClient(userAgent).Instances.PromoteReplica(project, d.Get("name").(string)).Do() + return rerr + }, + Timeout: d.Timeout(schema.TimeoutUpdate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsSqlOperationInProgressError}, + }) + if err != nil { + return fmt.Errorf("Error, failed to promote read replica instance as primary stand-alone %s: %s", instance.Name, err) + } + err = SqlAdminOperationWaitTime(config, op, project, "Promote Instance", userAgent, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + err = resourceSqlDatabaseInstanceRead(d, meta) + if err != nil { + return err + } + } + + // Check if the edition is being updated, because patching edition is an atomic operation and can not be + // performed with other fields, we first patch edition, tier and data cache config before updating the rest of the fields. + if d.HasChange("settings.0.edition") { + edition := d.Get("settings.0.edition").(string) + tier := d.Get("settings.0.tier").(string) + dataCacheConfig := expandDataCacheConfig(d.Get("settings.0.data_cache_config").([]interface{})) + instance = &sqladmin.DatabaseInstance{Settings: &sqladmin.Settings{Edition: edition, Tier: tier, DataCacheConfig: dataCacheConfig}} + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (rerr error) { + op, rerr = config.NewSqlAdminClient(userAgent).Instances.Patch(project, d.Get("name").(string), instance).Do() + return rerr + }, + Timeout: d.Timeout(schema.TimeoutUpdate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsSqlOperationInProgressError}, + }) + if err != nil { + return fmt.Errorf("Error, failed to patch instance settings for %s: %s", instance.Name, err) + } + err = SqlAdminOperationWaitTime(config, op, project, "Patch Instance", userAgent, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + err = resourceSqlDatabaseInstanceRead(d, meta) + if err != nil { + return err + } + } + + s := d.Get("settings") + instance = &sqladmin.DatabaseInstance{ + Settings: expandSqlDatabaseInstanceSettings(desiredSetting.([]interface{}), databaseVersion), + } + _settings := s.([]interface{})[0].(map[string]interface{}) + // Instance.Patch operation on completion updates the settings proto version by +8. As terraform does not know this it tries + // to make an update call with the proto version before patch and fails. To resolve this issue we update the setting version + // before making the update call. + instance.Settings.SettingsVersion = int64(_settings["version"].(int)) + // Collation cannot be included in the update request + instance.Settings.Collation = "" + + // Lock on the master_instance_name just in case updating any replica + // settings causes operations on the master. + if v, ok := d.GetOk("master_instance_name"); ok { + transport_tpg.MutexStore.Lock(instanceMutexKey(project, v.(string))) + defer transport_tpg.MutexStore.Unlock(instanceMutexKey(project, v.(string))) + } + + if _, ok := d.GetOk("instance_type"); ok { + instance.InstanceType = d.Get("instance_type").(string) + } + + // Database Version is required for all calls with Google ML integration enabled or it will be rejected by the API. + if d.Get("settings.0.enable_google_ml_integration").(bool) { + instance.DatabaseVersion = databaseVersion + } + + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (rerr error) { + op, rerr = config.NewSqlAdminClient(userAgent).Instances.Update(project, d.Get("name").(string), instance).Do() + return rerr + }, + Timeout: d.Timeout(schema.TimeoutUpdate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsSqlOperationInProgressError}, + }) + if err != nil { + return fmt.Errorf("Error, failed to update instance settings for %s: %s", instance.Name, err) + } + + err = SqlAdminOperationWaitTime(config, op, project, "Update Instance", userAgent, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + + // Perform a backup restore if the backup context exists and has changed + if r, ok := d.GetOk("restore_backup_context"); ok { + if d.HasChange("restore_backup_context") { + err = sqlDatabaseInstanceRestoreFromBackup(d, config, userAgent, project, d.Get("name").(string), r) + if err != nil { + return err + } + } + } + + return resourceSqlDatabaseInstanceRead(d, meta) +} + +func maintenanceVersionDiffSuppress(_, old, new string, _ *schema.ResourceData) bool { + // Ignore the database version part and only compare the last part of the maintenance version which represents the release date of the version. + if len(old) > 14 && len(new) > 14 && old[len(old)-14:] >= new[len(new)-14:] { + log.Printf("[DEBUG] Maintenance version in configuration [%s] is older than current maintenance version [%s] on instance. Suppressing diff", new, old) + return true + } else { + return false + } +} + +func resourceSqlDatabaseInstanceDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + // Check if deletion protection is enabled. + + if d.Get("deletion_protection").(bool) { + return fmt.Errorf("Error, failed to delete instance because deletion_protection is set to true. Set it to false to proceed with instance deletion") + } + + // Lock on the master_instance_name just in case deleting a replica causes + // operations on the master. + if v, ok := d.GetOk("master_instance_name"); ok { + transport_tpg.MutexStore.Lock(instanceMutexKey(project, v.(string))) + defer transport_tpg.MutexStore.Unlock(instanceMutexKey(project, v.(string))) + } + + var op *sqladmin.Operation + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (rerr error) { + op, rerr = config.NewSqlAdminClient(userAgent).Instances.Delete(project, d.Get("name").(string)).Do() + if rerr != nil { + return rerr + } + err = SqlAdminOperationWaitTime(config, op, project, "Delete Instance", userAgent, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return err + } + return nil + }, + Timeout: d.Timeout(schema.TimeoutDelete), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsSqlOperationInProgressError, IsSqlInternalError}, + }) + if err != nil { + return fmt.Errorf("Error, failed to delete instance %s: %s", d.Get("name").(string), err) + } + return nil +} + +func resourceSqlDatabaseInstanceImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/instances/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)"}, d, config); err != nil { + return nil, err + } + + if err := d.Set("deletion_protection", true); err != nil { + return nil, fmt.Errorf("Error setting deletion_protection: %s", err) + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{"{{"}}project{{"}}"}}/instances/{{"{{"}}name{{"}}"}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenSettings(settings *sqladmin.Settings, d *schema.ResourceData) []map[string]interface{} { + data := map[string]interface{}{ + "version": settings.SettingsVersion, + "tier": settings.Tier, + "edition": flattenEdition(settings.Edition), + "activation_policy": settings.ActivationPolicy, + "availability_type": settings.AvailabilityType, + "collation": settings.Collation, + "connector_enforcement": settings.ConnectorEnforcement, + "disk_type": settings.DataDiskType, + "disk_size": settings.DataDiskSizeGb, + "pricing_plan": settings.PricingPlan, + "user_labels": settings.UserLabels, + "password_validation_policy": settings.PasswordValidationPolicy, + "time_zone": settings.TimeZone, + "deletion_protection_enabled": settings.DeletionProtectionEnabled, + } + + if settings.ActiveDirectoryConfig != nil { + data["active_directory_config"] = flattenActiveDirectoryConfig(settings.ActiveDirectoryConfig) + } + + if settings.DenyMaintenancePeriods != nil { + data["deny_maintenance_period"] = flattenDenyMaintenancePeriod(settings.DenyMaintenancePeriods) + } + + if settings.SqlServerAuditConfig != nil { + data["sql_server_audit_config"] = flattenSqlServerAuditConfig(settings.SqlServerAuditConfig) + } + + if settings.BackupConfiguration != nil { + data["backup_configuration"] = flattenBackupConfiguration(settings.BackupConfiguration) + } + + if settings.DatabaseFlags != nil { + data["database_flags"] = flattenDatabaseFlags(settings.DatabaseFlags) + } + + if settings.IpConfiguration != nil { + data["ip_configuration"] = flattenIpConfiguration(settings.IpConfiguration, d) + } + + if settings.LocationPreference != nil { + data["location_preference"] = flattenLocationPreference(settings.LocationPreference) + } + + if settings.MaintenanceWindow != nil { + data["maintenance_window"] = flattenMaintenanceWindow(settings.MaintenanceWindow) + } + + if settings.InsightsConfig != nil { + data["insights_config"] = flattenInsightsConfig(settings.InsightsConfig) + } + + data["disk_autoresize"] = settings.StorageAutoResize + data["disk_autoresize_limit"] = settings.StorageAutoResizeLimit + + data["enable_google_ml_integration"] = settings.EnableGoogleMlIntegration + data["enable_dataplex_integration"] = settings.EnableDataplexIntegration + + if settings.UserLabels != nil { + data["user_labels"] = settings.UserLabels + } + + if settings.PasswordValidationPolicy != nil { + data["password_validation_policy"] = flattenPasswordValidationPolicy(settings.PasswordValidationPolicy) + } + + if settings.DataCacheConfig != nil { + data["data_cache_config"] = flattenDataCacheConfig(settings.DataCacheConfig) + } + + if settings.AdvancedMachineFeatures != nil { + data["advanced_machine_features"] = flattenSqlServerAdvancedMachineFeatures(settings.AdvancedMachineFeatures) + } + + return []map[string]interface{}{data} +} + +func flattenDataCacheConfig(d *sqladmin.DataCacheConfig) []map[string]interface{} { + if d == nil { + return nil + } + return []map[string]interface{}{ + { + "data_cache_enabled": d.DataCacheEnabled, + }, + } +} + +func flattenBackupConfiguration(backupConfiguration *sqladmin.BackupConfiguration) []map[string]interface{} { + data := map[string]interface{}{ + "binary_log_enabled": backupConfiguration.BinaryLogEnabled, + "enabled": backupConfiguration.Enabled, + "start_time": backupConfiguration.StartTime, + "location": backupConfiguration.Location, + "point_in_time_recovery_enabled": backupConfiguration.PointInTimeRecoveryEnabled, + "backup_retention_settings": flattenBackupRetentionSettings(backupConfiguration.BackupRetentionSettings), + "transaction_log_retention_days": backupConfiguration.TransactionLogRetentionDays, + } + + return []map[string]interface{}{data} +} + +func flattenBackupRetentionSettings(b *sqladmin.BackupRetentionSettings) []map[string]interface{} { + if b == nil { + return nil + } + return []map[string]interface{}{ + { + "retained_backups": b.RetainedBackups, + "retention_unit": b.RetentionUnit, + }, + } +} + +func flattenActiveDirectoryConfig(sqlActiveDirectoryConfig *sqladmin.SqlActiveDirectoryConfig) []map[string]interface{} { + if sqlActiveDirectoryConfig == nil { + return nil + } + return []map[string]interface{}{ + { + "domain": sqlActiveDirectoryConfig.Domain, + }, + } +} + +func flattenDenyMaintenancePeriod(denyMaintenancePeriod []*sqladmin.DenyMaintenancePeriod) []map[string]interface{} { + flags := make([]map[string]interface{}, 0, len(denyMaintenancePeriod)) + + for _, flag := range denyMaintenancePeriod { + data := map[string]interface{}{ + "end_date": flag.EndDate, + "start_date": flag.StartDate, + "time": flag.Time, + } + + flags = append(flags, data) + } + + return flags +} + +func flattenSqlServerAdvancedMachineFeatures(advancedMachineFeatures *sqladmin.AdvancedMachineFeatures) []map[string]interface{} { + if advancedMachineFeatures == nil { + return nil + } + return []map[string]interface{}{ + { + "threads_per_core": advancedMachineFeatures.ThreadsPerCore, + }, + } +} + +func flattenSqlServerAuditConfig(sqlServerAuditConfig *sqladmin.SqlServerAuditConfig) []map[string]interface{} { + if sqlServerAuditConfig == nil { + return nil + } + return []map[string]interface{}{ + { + "bucket": sqlServerAuditConfig.Bucket, + "retention_interval": sqlServerAuditConfig.RetentionInterval, + "upload_interval": sqlServerAuditConfig.UploadInterval, + }, + } +} + +func flattenDatabaseFlags(databaseFlags []*sqladmin.DatabaseFlags) []map[string]interface{} { + flags := make([]map[string]interface{}, 0, len(databaseFlags)) + + for _, flag := range databaseFlags { + data := map[string]interface{}{ + "name": flag.Name, + "value": flag.Value, + } + + flags = append(flags, data) + } + + return flags +} + +func flattenIpConfiguration(ipConfiguration *sqladmin.IpConfiguration, d *schema.ResourceData) interface{} { + data := map[string]interface{}{ + "ipv4_enabled": ipConfiguration.Ipv4Enabled, + "private_network": ipConfiguration.PrivateNetwork, + "allocated_ip_range": ipConfiguration.AllocatedIpRange, + "enable_private_path_for_google_cloud_services": ipConfiguration.EnablePrivatePathForGoogleCloudServices, + "ssl_mode": ipConfiguration.SslMode, + "server_ca_mode": ipConfiguration.ServerCaMode, + } + + if ipConfiguration.AuthorizedNetworks != nil { + data["authorized_networks"] = flattenAuthorizedNetworks(ipConfiguration.AuthorizedNetworks) + } + + if ipConfiguration.PscConfig != nil { + data["psc_config"] = flattenPscConfigs(ipConfiguration.PscConfig) + } + + return []map[string]interface{}{data} +} + +func flattenPscConfigs(pscConfig *sqladmin.PscConfig) interface{} { + data := map[string]interface{}{ + "psc_enabled": pscConfig.PscEnabled, + "allowed_consumer_projects": schema.NewSet(schema.HashString, tpgresource.ConvertStringArrToInterface(pscConfig.AllowedConsumerProjects)), + } + + return []map[string]interface{}{data} +} + +func flattenAuthorizedNetworks(entries []*sqladmin.AclEntry) interface{} { + networks := schema.NewSet(schema.HashResource(sqlDatabaseAuthorizedNetWorkSchemaElem), []interface{}{}) + + for _, entry := range entries { + data := map[string]interface{}{ + "expiration_time": entry.ExpirationTime, + "name": entry.Name, + "value": entry.Value, + } + + networks.Add(data) + } + + return networks +} + +func flattenLocationPreference(locationPreference *sqladmin.LocationPreference) interface{} { + data := map[string]interface{}{ + "follow_gae_application": locationPreference.FollowGaeApplication, + "zone": locationPreference.Zone, + "secondary_zone": locationPreference.SecondaryZone, + } + + return []map[string]interface{}{data} +} + +func flattenMaintenanceWindow(maintenanceWindow *sqladmin.MaintenanceWindow) interface{} { + data := map[string]interface{}{ + "day": maintenanceWindow.Day, + "hour": maintenanceWindow.Hour, + "update_track": maintenanceWindow.UpdateTrack, + } + + return []map[string]interface{}{data} +} + +func flattenReplicaConfiguration(replicaConfiguration *sqladmin.ReplicaConfiguration, d *schema.ResourceData) []map[string]interface{} { + rc := []map[string]interface{}{} + + if replicaConfiguration != nil { + data := map[string]interface{}{ + "failover_target": replicaConfiguration.FailoverTarget, + + // Don't attempt to assign anything from replicaConfiguration.MysqlReplicaConfiguration, + // since those fields are set on create and then not stored. See description at + // https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/instances. + // Instead, set them to the values they previously had so we don't set them all to zero. + "ca_certificate": d.Get("replica_configuration.0.ca_certificate"), + "client_certificate": d.Get("replica_configuration.0.client_certificate"), + "client_key": d.Get("replica_configuration.0.client_key"), + "connect_retry_interval": d.Get("replica_configuration.0.connect_retry_interval"), + "dump_file_path": d.Get("replica_configuration.0.dump_file_path"), + "master_heartbeat_period": d.Get("replica_configuration.0.master_heartbeat_period"), + "password": d.Get("replica_configuration.0.password"), + "ssl_cipher": d.Get("replica_configuration.0.ssl_cipher"), + "username": d.Get("replica_configuration.0.username"), + "verify_server_certificate": d.Get("replica_configuration.0.verify_server_certificate"), + } + rc = append(rc, data) + } + + return rc +} + +func flattenIpAddresses(ipAddresses []*sqladmin.IpMapping) []map[string]interface{} { + var ips []map[string]interface{} + + for _, ip := range ipAddresses { + data := map[string]interface{}{ + "ip_address": ip.IpAddress, + "type": ip.Type, + "time_to_retire": ip.TimeToRetire, + } + + ips = append(ips, data) + } + + return ips +} + +func flattenServerCaCerts(caCerts []*sqladmin.SslCert) []map[string]interface{} { + var certs []map[string]interface{} + + for _, caCert := range caCerts { + if caCert != nil { + data := map[string]interface{}{ + "cert": caCert.Cert, + "common_name": caCert.CommonName, + "create_time": caCert.CreateTime, + "expiration_time": caCert.ExpirationTime, + "sha1_fingerprint": caCert.Sha1Fingerprint, + } + + certs = append(certs, data) + } + } + + return certs +} + +func flattenInsightsConfig(insightsConfig *sqladmin.InsightsConfig) interface{} { + data := map[string]interface{}{ + "query_insights_enabled": insightsConfig.QueryInsightsEnabled, + "query_string_length": insightsConfig.QueryStringLength, + "record_application_tags": insightsConfig.RecordApplicationTags, + "record_client_address": insightsConfig.RecordClientAddress, + "query_plans_per_minute": insightsConfig.QueryPlansPerMinute, + } + + return []map[string]interface{}{data} +} + +func flattenPasswordValidationPolicy(passwordValidationPolicy *sqladmin.PasswordValidationPolicy) interface{} { + data := map[string]interface{}{ + "min_length": passwordValidationPolicy.MinLength, + "complexity": passwordValidationPolicy.Complexity, + "reuse_interval": passwordValidationPolicy.ReuseInterval, + "disallow_username_substring": passwordValidationPolicy.DisallowUsernameSubstring, + "password_change_interval": passwordValidationPolicy.PasswordChangeInterval, + "enable_password_policy": passwordValidationPolicy.EnablePasswordPolicy, + } + return []map[string]interface{}{data} +} + +func flattenEdition(v interface{}) string { + if v == nil || tpgresource.IsEmptyValue(reflect.ValueOf(v)) { + return "ENTERPRISE" + } + + return v.(string) +} + +func instanceMutexKey(project, instance_name string) string { + return fmt.Sprintf("google-sql-database-instance-%s-%s", project, instance_name) +} + +// sqlDatabaseIsMaster returns true if the provided schema.ResourceData represents a +// master SQL Instance, and false if it is a replica. +func sqlDatabaseIsMaster(d *schema.ResourceData) bool { + _, ok := d.GetOk("master_instance_name") + return !ok +} + +func sqlDatabaseInstanceServiceNetworkPrecheck(d *schema.ResourceData, config *transport_tpg.Config, userAgent, network string) error { + log.Printf("[DEBUG] checking network %q for at least one service networking connection", network) + // This call requires projects.get permissions, which may not have been granted to the Terraform actor, + // particularly in shared VPC setups. Most will! But it's not strictly required. + serviceNetworkingNetworkName, err := servicenetworking.RetrieveServiceNetworkingNetworkName(d, config, network, userAgent) + if err != nil { + var gerr *googleapi.Error + if errors.As(err, &gerr) { + log.Printf("[DEBUG] retrieved googleapi error while creating sn name for %q. precheck skipped. code %v and message: %s", network, gerr.Code, gerr.Body) + return nil + } + + return err + } + + response, err := config.NewServiceNetworkingClient(userAgent).Services.Connections.List("services/servicenetworking.googleapis.com").Network(serviceNetworkingNetworkName).Do() + if err != nil { + // It is possible that the actor creating the SQL Instance might not have permissions to call servicenetworking.services.connections.list + log.Printf("[WARNING] Failed to list Service Networking of the project. Skipped Service Networking precheck.") + return nil + } + + if len(response.Connections) < 1 { + return fmt.Errorf("Error, failed to create instance because the network doesn't have at least 1 private services connection. Please see https://cloud.google.com/sql/docs/mysql/private-ip#network_requirements for how to create this connection.") + } + + return nil +} + +func expandRestoreBackupContext(configured []interface{}) *sqladmin.RestoreBackupContext { + if len(configured) == 0 || configured[0] == nil { + return nil + } + + _rc := configured[0].(map[string]interface{}) + return &sqladmin.RestoreBackupContext{ + BackupRunId: int64(_rc["backup_run_id"].(int)), + InstanceId: _rc["instance_id"].(string), + Project: _rc["project"].(string), + } +} + +func sqlDatabaseInstanceRestoreFromBackup(d *schema.ResourceData, config *transport_tpg.Config, userAgent, project, instanceId string, r interface{}) error { + log.Printf("[DEBUG] Initiating SQL database instance backup restore") + restoreContext := r.([]interface{}) + + backupRequest := &sqladmin.InstancesRestoreBackupRequest{ + RestoreBackupContext: expandRestoreBackupContext(restoreContext), + } + + var op *sqladmin.Operation + err := transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (operr error) { + op, operr = config.NewSqlAdminClient(userAgent).Instances.RestoreBackup(project, instanceId, backupRequest).Do() + return operr + }, + Timeout: d.Timeout(schema.TimeoutUpdate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsSqlOperationInProgressError}, + }) + if err != nil { + return fmt.Errorf("Error, failed to restore instance from backup %s: %s", instanceId, err) + } + + err = SqlAdminOperationWaitTime(config, op, project, "Restore Backup", userAgent, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + + return nil +} + +func caseDiffDashSuppress(_, old, new string, _ *schema.ResourceData) bool { + postReplaceNew := strings.Replace(new, "-", "_", -1) + return strings.ToUpper(postReplaceNew) == strings.ToUpper(old) +} + +func isMasterInstanceNameSet(_ context.Context, oldMasterInstanceName interface{}, newMasterInstanceName interface{}, _ interface{}) bool { + new := newMasterInstanceName.(string) + if new == "" { + return false + } + + return true +} + +func isReplicaPromoteRequested(_ context.Context, oldInstanceType interface{}, newInstanceType interface{}, _ interface{}) bool { + oldInstanceType = oldInstanceType.(string) + newInstanceType = newInstanceType.(string) + + if newInstanceType == "CLOUD_SQL_INSTANCE" && oldInstanceType == "READ_REPLICA_INSTANCE" { + return true + } + + return false +} + +func checkPromoteConfigurations(d *schema.ResourceData) error { + masterInstanceName := d.GetRawConfig().GetAttr("master_instance_name") + replicaConfiguration := d.GetRawConfig().GetAttr("replica_configuration").AsValueSlice() + + return validatePromoteConfigurations(masterInstanceName, replicaConfiguration) +} + +func checkPromoteConfigurationsAndUpdateDiff(_ context.Context, diff *schema.ResourceDiff, _ interface{}) error { + masterInstanceName := diff.GetRawConfig().GetAttr("master_instance_name") + replicaConfiguration := diff.GetRawConfig().GetAttr("replica_configuration").AsValueSlice() + + err := validatePromoteConfigurations(masterInstanceName, replicaConfiguration) + if err != nil { + return err + } + + err = diff.SetNew("master_instance_name", nil) + if err != nil { + return err + } + + err = diff.SetNew("replica_configuration", nil) + if err != nil { + return err + } + return nil +} + +func validatePromoteConfigurations(masterInstanceName cty.Value, replicaConfigurations []cty.Value) error { + if !masterInstanceName.IsNull() { + return fmt.Errorf("Replica promote configuration check failed. Please remove master_instance_name and try again.") + } + + if len(replicaConfigurations) != 0 { + return fmt.Errorf("Replica promote configuration check failed. Please remove replica_configuration and try again.") + } + return nil +} diff --git a/mmv1/third_party/terraform/services/storage/go/resource_storage_bucket.go b/mmv1/third_party/terraform/services/storage/go/resource_storage_bucket.go index 8ad06ff90f82..d8b0d19a4763 100644 --- a/mmv1/third_party/terraform/services/storage/go/resource_storage_bucket.go +++ b/mmv1/third_party/terraform/services/storage/go/resource_storage_bucket.go @@ -47,13 +47,23 @@ func ResourceStorageBucket() *schema.Resource { Read: schema.DefaultTimeout(4 * time.Minute), }, - SchemaVersion: 1, + SchemaVersion: 3, StateUpgraders: []schema.StateUpgrader{ { Type: resourceStorageBucketV0().CoreConfigSchema().ImpliedType(), Upgrade: ResourceStorageBucketStateUpgradeV0, Version: 0, }, + { + Type: resourceStorageBucketV1().CoreConfigSchema().ImpliedType(), + Upgrade: ResourceStorageBucketStateUpgradeV1, + Version: 1, + }, + { + Type: resourceStorageBucketV2().CoreConfigSchema().ImpliedType(), + Upgrade: ResourceStorageBucketStateUpgradeV2, + Version: 2, + }, }, Schema: map[string]*schema.Schema{ @@ -226,12 +236,6 @@ func ResourceStorageBucket() *schema.Resource { Optional: true, Description: `Creation date of an object in RFC 3339 (e.g. 2017-06-13) to satisfy this condition.`, }, - "no_age": { - Type: schema.TypeBool, - Deprecated: "`no_age` is deprecated and will be removed in a future major release. Use `send_age_if_zero` instead.", - Optional: true, - Description: `While set true, age value will be omitted.Required to set true when age is unset in the config file.`, - }, "with_state": { Type: schema.TypeString, Computed: true, @@ -265,7 +269,6 @@ func ResourceStorageBucket() *schema.Resource { "send_age_if_zero": { Type: schema.TypeBool, Optional: true, - Default: true, Description: `While set true, age value will be sent in the request even for zero value of the field. This field is only useful for setting 0 value to the age field. It can be used alone or together with age.`, }, "send_days_since_noncurrent_time_if_zero": { @@ -1406,14 +1409,12 @@ func flattenBucketLifecycleRuleCondition(index int, d *schema.ResourceData, cond // are already present otherwise setting them to individual default values. if v, ok := d.GetOk(fmt.Sprintf("lifecycle_rule.%d.condition",index)); ok{ state_condition := v.(*schema.Set).List()[0].(map[string]interface{}) - ruleCondition["no_age"] = state_condition["no_age"].(bool) ruleCondition["send_days_since_noncurrent_time_if_zero"] = state_condition["send_days_since_noncurrent_time_if_zero"].(bool) ruleCondition["send_days_since_custom_time_if_zero"] = state_condition["send_days_since_custom_time_if_zero"].(bool) ruleCondition["send_num_newer_versions_if_zero"] = state_condition["send_num_newer_versions_if_zero"].(bool) ruleCondition["send_age_if_zero"] = state_condition["send_age_if_zero"].(bool) } else { - ruleCondition["no_age"] = false - ruleCondition["send_age_if_zero"] = true + ruleCondition["send_age_if_zero"] = false ruleCondition["send_days_since_noncurrent_time_if_zero"] = false ruleCondition["send_days_since_custom_time_if_zero"] = false ruleCondition["send_num_newer_versions_if_zero"] = false @@ -1566,15 +1567,10 @@ func expandStorageBucketLifecycleRuleCondition(v interface{}) (*storage.BucketLi condition := conditions[0].(map[string]interface{}) transformed := &storage.BucketLifecycleRuleCondition{} - // Setting high precedence of no_age over age and send_age_if_zero. - // Only sets age value when no_age is not present or no_age is present and has false value - if v, ok := condition["no_age"]; !ok || !(v.(bool)) { - if v, ok := condition["age"]; ok { - age := int64(v.(int)) - u, ok := condition["send_age_if_zero"] - if age > 0 || (ok && u.(bool)) { - transformed.Age = &age - } + if v, ok := condition["age"]; ok { + age := int64(v.(int)) + if u, ok := condition["send_age_if_zero"]; age > 0 || (ok && u.(bool)) { + transformed.Age = &age } } @@ -1685,15 +1681,8 @@ func resourceGCSBucketLifecycleRuleConditionHash(v interface{}) int { var buf bytes.Buffer m := v.(map[string]interface{}) - if v, ok := m["no_age"]; ok && v.(bool){ - buf.WriteString(fmt.Sprintf("%t-", v.(bool))) - } else { - if v, ok := m["send_age_if_zero"]; ok { - buf.WriteString(fmt.Sprintf("%t-", v.(bool))) - } - if v, ok := m["age"]; ok { - buf.WriteString(fmt.Sprintf("%d-", v.(int))) - } + if v, ok := m["age"]; ok { + buf.WriteString(fmt.Sprintf("%d-", v.(int))) } if v, ok := m["days_since_custom_time"]; ok { @@ -1737,6 +1726,10 @@ func resourceGCSBucketLifecycleRuleConditionHash(v interface{}) int { buf.WriteString(fmt.Sprintf("%d-", v.(int))) } + if v, ok := m["send_age_if_zero"]; ok { + buf.WriteString(fmt.Sprintf("%t-", v.(bool))) + } + if v, ok := m["send_days_since_noncurrent_time_if_zero"]; ok { buf.WriteString(fmt.Sprintf("%t-", v.(bool))) } @@ -1859,8 +1852,7 @@ func setStorageBucket(d *schema.ResourceData, config *transport_tpg.Config, res if err := d.Set("autoclass", flattenBucketAutoclass(res.Autoclass)); err != nil { return fmt.Errorf("Error setting autoclass: %s", err) } - // lifecycle_rule contains terraform only variable no_age. - // Passing config("d") to flattener function to set no_age separately. + // Passing config("d") to flattener function to set virtual fields separately. if err := d.Set("lifecycle_rule", flattenBucketLifecycle(d, res.Lifecycle)); err != nil { return fmt.Errorf("Error setting lifecycle_rule: %s", err) } diff --git a/mmv1/third_party/terraform/services/storage/go/resource_storage_bucket_test.go b/mmv1/third_party/terraform/services/storage/go/resource_storage_bucket_test.go index 83f48c3aeb56..19f6a9feae4d 100644 --- a/mmv1/third_party/terraform/services/storage/go/resource_storage_bucket_test.go +++ b/mmv1/third_party/terraform/services/storage/go/resource_storage_bucket_test.go @@ -133,7 +133,7 @@ func TestAccStorageBucket_AutoclassDiffSupress(t *testing.T) { ImportStateVerifyIgnore: []string{"force_destroy"}, }, { - Config: testAccStorageBucket_basicWithAutoclass(bucketName, false), + Config: testAccStorageBucket_basicWithAutoclass(bucketName,false), Check: resource.ComposeTestCheckFunc( testAccCheckStorageBucketExists( t, "google_storage_bucket.bucket", bucketName, &bucket), @@ -146,7 +146,7 @@ func TestAccStorageBucket_AutoclassDiffSupress(t *testing.T) { ImportStateVerifyIgnore: []string{"force_destroy"}, }, { - Config: testAccStorageBucket_basicWithAutoclass(bucketName, true), + Config: testAccStorageBucket_basicWithAutoclass(bucketName,true), Check: resource.ComposeTestCheckFunc( testAccCheckStorageBucketExists( t, "google_storage_bucket.bucket", bucketName, &bucket), @@ -346,7 +346,7 @@ func TestAccStorageBucket_dualLocation_rpo(t *testing.T) { ImportStateVerifyIgnore: []string{"force_destroy"}, }, { - Config: testAccStorageBucket_dualLocation_rpo(bucketName, "ASYNC_TURBO"), + Config: testAccStorageBucket_dualLocation_rpo(bucketName,"ASYNC_TURBO"), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr( "google_storage_bucket.bucket", "rpo", "ASYNC_TURBO"), @@ -359,7 +359,7 @@ func TestAccStorageBucket_dualLocation_rpo(t *testing.T) { ImportStateVerifyIgnore: []string{"force_destroy"}, }, { - Config: testAccStorageBucket_dualLocation_rpo(bucketName, "DEFAULT"), + Config: testAccStorageBucket_dualLocation_rpo(bucketName,"DEFAULT"), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr( "google_storage_bucket.bucket", "rpo", "DEFAULT"), @@ -399,7 +399,7 @@ func TestAccStorageBucket_multiLocation_rpo(t *testing.T) { ImportStateVerifyIgnore: []string{"force_destroy"}, }, { - Config: testAccStorageBucket_multiLocation_rpo(bucketName, "DEFAULT"), + Config: testAccStorageBucket_multiLocation_rpo(bucketName,"DEFAULT"), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr( "google_storage_bucket.bucket", "rpo", "DEFAULT"), @@ -655,14 +655,13 @@ func TestAccStorageBucket_lifecycleRulesVirtualFields(t *testing.T) { ResourceName: "google_storage_bucket.bucket", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"force_destroy", "lifecycle_rule.1.condition.0.no_age", "lifecycle_rule.1.condition.0.send_days_since_noncurrent_time_if_zero", "lifecycle_rule.2.condition.0.send_days_since_noncurrent_time_if_zero", "lifecycle_rule.1.condition.0.send_days_since_custom_time_if_zero", "lifecycle_rule.2.condition.0.send_days_since_custom_time_if_zero", "lifecycle_rule.1.condition.0.send_num_newer_versions_if_zero", "lifecycle_rule.2.condition.0.send_num_newer_versions_if_zero", "lifecycle_rule.1.condition.0.send_age_if_zero"}, + ImportStateVerifyIgnore: []string{"force_destroy", "lifecycle_rule.1.condition.0.send_days_since_noncurrent_time_if_zero", "lifecycle_rule.2.condition.0.send_days_since_noncurrent_time_if_zero", "lifecycle_rule.1.condition.0.send_days_since_custom_time_if_zero", "lifecycle_rule.2.condition.0.send_days_since_custom_time_if_zero", "lifecycle_rule.1.condition.0.send_num_newer_versions_if_zero", "lifecycle_rule.2.condition.0.send_num_newer_versions_if_zero", "lifecycle_rule.1.condition.0.send_age_if_zero", "lifecycle_rule.2.condition.0.send_age_if_zero"}, }, { Config: testAccStorageBucket_customAttributes_withLifecycleVirtualFieldsUpdate2(bucketName), Check: resource.ComposeTestCheckFunc( testAccCheckStorageBucketExists( t, "google_storage_bucket.bucket", bucketName, &bucket), - testAccCheckStorageBucketLifecycleConditionNoAge(nil, &bucket, 1), testAccCheckStorageBucketLifecycleConditionNoAge(nil, &bucket, 2), ), }, @@ -670,7 +669,7 @@ func TestAccStorageBucket_lifecycleRulesVirtualFields(t *testing.T) { ResourceName: "google_storage_bucket.bucket", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"force_destroy", "lifecycle_rule.1.condition.0.no_age", "lifecycle_rule.0.condition.0.send_days_since_noncurrent_time_if_zero", "lifecycle_rule.0.condition.0.send_days_since_custom_time_if_zero", "lifecycle_rule.0.condition.0.send_num_newer_versions_if_zero", "lifecycle_rule.0.condition.0.send_age_if_zero", "lifecycle_rule.1.condition.0.send_age_if_zero", "lifecycle_rule.2.condition.0.send_age_if_zero"}, + ImportStateVerifyIgnore: []string{"force_destroy", "lifecycle_rule.0.condition.0.send_days_since_noncurrent_time_if_zero", "lifecycle_rule.0.condition.0.send_days_since_custom_time_if_zero", "lifecycle_rule.0.condition.0.send_num_newer_versions_if_zero", "lifecycle_rule.0.condition.0.send_age_if_zero", "lifecycle_rule.1.condition.0.send_age_if_zero", "lifecycle_rule.2.condition.0.send_age_if_zero"}, }, { Config: testAccStorageBucket_customAttributes_withLifecycle1(bucketName), @@ -1513,7 +1512,7 @@ func TestAccStorageBucket_SoftDeletePolicy(t *testing.T) { ImportStateVerifyIgnore: []string{"force_destroy"}, }, { - Config: testAccStorageBucket_SoftDeletePolicy(bucketName, 7776000), + Config: testAccStorageBucket_SoftDeletePolicy(bucketName,7776000), Check: resource.ComposeTestCheckFunc( testAccCheckStorageBucketExists( t, "google_storage_bucket.bucket", bucketName, &bucket), @@ -1528,7 +1527,7 @@ func TestAccStorageBucket_SoftDeletePolicy(t *testing.T) { ImportStateVerifyIgnore: []string{"force_destroy"}, }, { - Config: testAccStorageBucket_SoftDeletePolicy(bucketName, 0), + Config: testAccStorageBucket_SoftDeletePolicy(bucketName,0), Check: resource.ComposeTestCheckFunc( testAccCheckStorageBucketExists( t, "google_storage_bucket.bucket", bucketName, &bucket), @@ -1804,7 +1803,7 @@ resource "google_storage_bucket" "bucket" { `, bucketName) } -func testAccStorageBucket_dualLocation_rpo(bucketName string, rpo string) string { +func testAccStorageBucket_dualLocation_rpo(bucketName string,rpo string) string { return fmt.Sprintf(` resource "google_storage_bucket" "bucket" { name = "%s" @@ -1815,10 +1814,10 @@ resource "google_storage_bucket" "bucket" { } rpo = "%s" } -`, bucketName, rpo) +`, bucketName,rpo) } -func testAccStorageBucket_multiLocation_rpo(bucketName string, rpo string) string { +func testAccStorageBucket_multiLocation_rpo(bucketName string,rpo string) string { return fmt.Sprintf(` resource "google_storage_bucket" "bucket" { name = "%s" @@ -1826,7 +1825,7 @@ resource "google_storage_bucket" "bucket" { force_destroy = true rpo = "%s" } -`, bucketName, rpo) +`, bucketName,rpo) } func testAccStorageBucket_customAttributes(bucketName string) string { @@ -1868,6 +1867,7 @@ resource "google_storage_bucket" "bucket" { type = "Delete" } condition { + send_age_if_zero = true age = 0 } } @@ -1915,7 +1915,6 @@ resource "google_storage_bucket" "bucket" { } condition { age = 10 - no_age = false days_since_noncurrent_time = 0 send_days_since_noncurrent_time_if_zero = false days_since_custom_time = 0 @@ -1929,7 +1928,6 @@ resource "google_storage_bucket" "bucket" { type = "Delete" } condition { - no_age = true days_since_noncurrent_time = 0 send_days_since_noncurrent_time_if_zero = true days_since_custom_time = 0 @@ -1943,6 +1941,7 @@ resource "google_storage_bucket" "bucket" { type = "Delete" } condition { + send_age_if_zero = true send_days_since_noncurrent_time_if_zero = true send_days_since_custom_time_if_zero = true send_num_newer_versions_if_zero = true @@ -1964,7 +1963,6 @@ resource "google_storage_bucket" "bucket" { } condition { age = 10 - no_age = false days_since_noncurrent_time = 0 send_days_since_noncurrent_time_if_zero = true days_since_custom_time = 0 @@ -1979,7 +1977,6 @@ resource "google_storage_bucket" "bucket" { } condition { age = 10 - no_age = true send_age_if_zero = false custom_time_before = "2022-09-01" days_since_noncurrent_time = 0 @@ -2549,7 +2546,7 @@ resource "google_storage_bucket" "bucket" { } func testAccStorageBucket_SoftDeletePolicy(bucketName string, duration int) string { - return fmt.Sprintf(` + return fmt.Sprintf(` resource "google_storage_bucket" "bucket" { name = "%s" location = "US" From b7381a61effb1dc12e220b0cf191df2dc96ae93a Mon Sep 17 00:00:00 2001 From: Zhenhua Li Date: Tue, 3 Sep 2024 14:23:26 -0700 Subject: [PATCH 21/60] Add go yaml validations (part 1) (#11621) --- mmv1/api/async.go | 71 +++++------------------- mmv1/api/product.go | 51 +++++++++-------- mmv1/api/product/version.go | 16 ++++-- mmv1/api/resource.go | 82 ++++++++++++++++++++++++---- mmv1/api/resource/custom_code.go | 21 ------- mmv1/api/resource/docs.go | 9 --- mmv1/api/resource/examples.go | 37 +++++++++++-- mmv1/api/resource/iam_policy.go | 24 ++++++-- mmv1/api/resource/nested_query.go | 14 ++--- mmv1/api/resource/reference_links.go | 10 ---- mmv1/api/resource/sweeper.go | 6 -- mmv1/api/resource/validation.go | 7 --- mmv1/api/timeouts.go | 8 --- mmv1/api/type.go | 22 ++++++++ mmv1/go.mod | 2 +- mmv1/go.sum | 4 +- mmv1/google/yaml_validator.go | 4 +- 17 files changed, 204 insertions(+), 184 deletions(-) diff --git a/mmv1/api/async.go b/mmv1/api/async.go index dd591c87d111..0905853e80a1 100644 --- a/mmv1/api/async.go +++ b/mmv1/api/async.go @@ -14,11 +14,11 @@ package api import ( + "log" "strings" "github.com/GoogleCloudPlatform/magic-modules/mmv1/google" "golang.org/x/exp/slices" - "gopkg.in/yaml.v3" ) // Base class from which other Async classes can inherit. @@ -40,13 +40,6 @@ type Async struct { PollAsync `yaml:",inline"` } -// def validate -// super - -// check :operation, type: Operation -// check :actions, default: %w[create delete update], type: ::Array, item_type: ::String -// end - // def allow?(method) func (a Async) Allow(method string) bool { return slices.Contains(a.Actions, strings.ToLower(method)) @@ -80,11 +73,6 @@ func NewAsync() *Async { return oa } -// def validate -// super -// check :resource_inside_response, type: :boolean, default: false -// end - // Represents an asynchronous operation definition type OpAsync struct { Result OpAsyncResult @@ -106,17 +94,6 @@ type OpAsync struct { // @error = error // end -// def validate -// super - -// check :operation, type: Operation, required: true -// check :result, type: Result, default: Result.new -// check :status, type: Status -// check :error, type: Error -// check :actions, default: %w[create delete update], type: ::Array, item_type: ::String -// check :include_project, type: :boolean, default: false -// end - type OpAsyncOperation struct { Kind string @@ -156,12 +133,6 @@ type OpAsyncResult struct { // @resource_inside_response = resource_inside_response // end -// def validate -// super - -// check :path, type: String -// end - // Provides information to parse the result response to check operation // status type OpAsyncStatus struct { @@ -181,12 +152,6 @@ type OpAsyncStatus struct { // @allowed = allowed // end -// def validate -// super -// check :path, type: String -// check :allowed, type: Array, item_type: [::String, :boolean] -// end - // Provides information on how to retrieve errors of the executed operations type OpAsyncError struct { google.YamlValidator @@ -202,12 +167,6 @@ type OpAsyncError struct { // @message = message // end -// def validate -// super -// check :path, type: String -// check :message, type: String -// end - // Async implementation for polling in Terraform type PollAsync struct { // Details how to poll for an eventually-consistent resource state. @@ -233,12 +192,12 @@ type PollAsync struct { TargetOccurrences int `yaml:"target_occurrences"` } -func (a *Async) UnmarshalYAML(n *yaml.Node) error { +func (a *Async) UnmarshalYAML(unmarshal func(any) error) error { a.Actions = []string{"create", "delete", "update"} type asyncAlias Async aliasObj := (*asyncAlias)(a) - err := n.Decode(&aliasObj) + err := unmarshal(aliasObj) if err != nil { return err } @@ -250,16 +209,14 @@ func (a *Async) UnmarshalYAML(n *yaml.Node) error { return nil } -// return nil -// } - -// def validate -// super - -// check :check_response_func_existence, type: String, required: true -// check :check_response_func_absence, type: String, -// default: 'transport_tpg.PollCheckForAbsence' -// check :custom_poll_read, type: String -// check :suppress_error, type: :boolean, default: false -// check :target_occurrences, type: Integer, default: 1 -// end +func (a *Async) Validate() { + if a.Type == "OpAsync" { + if a.Operation == nil { + log.Fatalf("Missing `Operation` for OpAsync") + } else { + if a.Operation.BaseUrl != "" && a.Operation.FullUrl != "" { + log.Fatalf("`base_url` and `full_url` cannot be set at the same time in OpAsync operation.") + } + } + } +} diff --git a/mmv1/api/product.go b/mmv1/api/product.go index d02a5ac424d2..f157c057d802 100644 --- a/mmv1/api/product.go +++ b/mmv1/api/product.go @@ -16,8 +16,7 @@ package api import ( "log" "strings" - - "gopkg.in/yaml.v3" + "unicode" "github.com/GoogleCloudPlatform/magic-modules/mmv1/api/product" "github.com/GoogleCloudPlatform/magic-modules/mmv1/google" @@ -68,12 +67,11 @@ type Product struct { ClientName string `yaml:"client_name"` } -func (p *Product) UnmarshalYAML(n *yaml.Node) error { +func (p *Product) UnmarshalYAML(unmarshal func(any) error) error { type productAlias Product aliasObj := (*productAlias)(p) - err := n.Decode(&aliasObj) - if err != nil { + if err := unmarshal(aliasObj); err != nil { return err } @@ -84,31 +82,32 @@ func (p *Product) UnmarshalYAML(n *yaml.Node) error { } func (p *Product) Validate() { - // TODO Q2 Rewrite super - // super -} - -// def validate -// super -// set_variables @objects, :__product + // product names must start with a capital + for i, ch := range p.Name { + if !unicode.IsUpper(ch) { + log.Fatalf("product name `%s` must start with a capital letter.", p.Name) + } + if i == 0 { + break + } + } -// // name comes from Named, and product names must start with a capital -// caps = ('A'..'Z').to_a -// unless caps.include? @name[0] -// raise "product name `//{@name}` must start with a capital letter." -// end + if len(p.Scopes) == 0 { + log.Fatalf("Missing `scopes` for product %s", p.Name) + } -// check :display_name, type: String -// check :objects, type: Array, item_type: Api::Resource -// check :scopes, type: Array, item_type: String, required: true -// check :operation_retry, type: String + if p.Versions == nil { + log.Fatalf("Missing `versions` for product %s", p.Name) + } -// check :async, type: Api::Async -// check :legacy_name, type: String -// check :client_name, type: String + for _, v := range p.Versions { + v.Validate(p.Name) + } -// check :versions, type: Array, item_type: Api::Product::Version, required: true -// end + if p.Async != nil { + p.Async.Validate() + } +} // ==================== // Custom Setters diff --git a/mmv1/api/product/version.go b/mmv1/api/product/version.go index aa5bdd335c10..c6f3b14f884e 100644 --- a/mmv1/api/product/version.go +++ b/mmv1/api/product/version.go @@ -14,6 +14,8 @@ package product import ( + "log" + "golang.org/x/exp/slices" ) @@ -40,12 +42,14 @@ type Version struct { Name string } -// def validate -// super -// check :cai_base_url, type: String, required: false -// check :base_url, type: String, required: true -// check :name, type: String, allowed: ORDER, required: true -// end +func (v *Version) Validate(pName string) { + if v.Name == "" { + log.Fatalf("Missing `name` in `version` for product %s", pName) + } + if v.BaseUrl == "" { + log.Fatalf("Missing `base_url` in `version` for product %s", pName) + } +} // def to_s // "//{name}: //{base_url}" diff --git a/mmv1/api/resource.go b/mmv1/api/resource.go index f45908b8da70..cb1ab7daa791 100644 --- a/mmv1/api/resource.go +++ b/mmv1/api/resource.go @@ -14,6 +14,7 @@ package api import ( "fmt" + "log" "maps" "regexp" "sort" @@ -23,7 +24,6 @@ import ( "github.com/GoogleCloudPlatform/magic-modules/mmv1/api/resource" "github.com/GoogleCloudPlatform/magic-modules/mmv1/google" "golang.org/x/exp/slices" - "gopkg.in/yaml.v3" ) type Resource struct { @@ -311,7 +311,7 @@ type Resource struct { ImportPath string } -func (r *Resource) UnmarshalYAML(n *yaml.Node) error { +func (r *Resource) UnmarshalYAML(unmarshal func(any) error) error { r.CreateVerb = "POST" r.ReadVerb = "GET" r.DeleteVerb = "DELETE" @@ -320,7 +320,7 @@ func (r *Resource) UnmarshalYAML(n *yaml.Node) error { type resourceAlias Resource aliasObj := (*resourceAlias)(r) - err := n.Decode(&aliasObj) + err := unmarshal(aliasObj) if err != nil { return err } @@ -331,6 +331,9 @@ func (r *Resource) UnmarshalYAML(n *yaml.Node) error { if r.CollectionUrlKey == "" { r.CollectionUrlKey = google.Camelize(google.Plural(r.Name), "lower") } + if r.IdFormat == "" { + r.IdFormat = r.SelfLinkUri() + } if len(r.VirtualFields) > 0 { for _, f := range r.VirtualFields { @@ -341,19 +344,76 @@ func (r *Resource) UnmarshalYAML(n *yaml.Node) error { return nil } -// TODO: rewrite functions -func (r *Resource) Validate() { - // TODO Q1 Rewrite super - // super -} - func (r *Resource) SetDefault(product *Product) { r.ProductMetadata = product for _, property := range r.AllProperties() { property.SetDefault(r) } - if r.IdFormat == "" { - r.IdFormat = r.SelfLinkUri() +} + +func (r *Resource) Validate() { + if r.NestedQuery != nil && r.NestedQuery.IsListOfIds && len(r.Identity) != 1 { + log.Fatalf("`is_list_of_ids: true` implies resource has exactly one `identity` property") + } + + // Ensures we have all properties defined + for _, i := range r.Identity { + hasIdentify := slices.ContainsFunc(r.AllUserProperties(), func(p *Type) bool { + return p.Name == i + }) + if !hasIdentify { + log.Fatalf("Missing property/parameter for identity %s", i) + } + } + + if r.Description == "" { + log.Fatalf("Missing `description` for resource %s", r.Name) + } + + if !r.Exclude { + if len(r.Properties) == 0 { + log.Fatalf("Missing `properties` for resource %s", r.Name) + } + } + + allowed := []string{"POST", "PUT", "PATCH"} + if !slices.Contains(allowed, r.CreateVerb) { + log.Fatalf("Value on `create_verb` should be one of %#v", allowed) + } + + allowed = []string{"GET", "POST"} + if !slices.Contains(allowed, r.ReadVerb) { + log.Fatalf("Value on `read_verb` should be one of %#v", allowed) + } + + allowed = []string{"POST", "PUT", "PATCH", "DELETE"} + if !slices.Contains(allowed, r.DeleteVerb) { + log.Fatalf("Value on `delete_verb` should be one of %#v", allowed) + } + + allowed = []string{"POST", "PUT", "PATCH"} + if !slices.Contains(allowed, r.UpdateVerb) { + log.Fatalf("Value on `update_verb` should be one of %#v", allowed) + } + + for _, property := range r.AllProperties() { + property.Validate(r.Name) + } + + if r.IamPolicy != nil { + r.IamPolicy.Validate(r.Name) + } + + if r.NestedQuery != nil { + r.NestedQuery.Validate(r.Name) + } + + for _, example := range r.Examples { + example.Validate(r.Name) + } + + if r.Async != nil { + r.Async.Validate() } } diff --git a/mmv1/api/resource/custom_code.go b/mmv1/api/resource/custom_code.go index 48a69aa40e26..a00d09ce5467 100644 --- a/mmv1/api/resource/custom_code.go +++ b/mmv1/api/resource/custom_code.go @@ -136,24 +136,3 @@ type CustomCode struct { // with a success HTTP code for deleted resources TestCheckDestroy string `yaml:"test_check_destroy"` } - -// def validate -// super - -// check :extra_schema_entry, type: String -// check :encoder, type: String -// check :update_encoder, type: String -// check :decoder, type: String -// check :constants, type: String -// check :pre_create, type: String -// check :post_create, type: String -// check :custom_create, type: String -// check :pre_read, type: String -// check :pre_update, type: String -// check :post_update, type: String -// check :custom_update, type: String -// check :pre_delete, type: String -// check :custom_import, type: String -// check :post_import, type: String -// check :test_check_destroy, type: String -// end diff --git a/mmv1/api/resource/docs.go b/mmv1/api/resource/docs.go index 2b81541c09c3..2de8004d2350 100644 --- a/mmv1/api/resource/docs.go +++ b/mmv1/api/resource/docs.go @@ -41,12 +41,3 @@ type Docs struct { // attr_reader : Attributes string } - -// def validate -// super -// check :warning, type: String -// check :note, type: String -// check :required_properties, type: String -// check :optional_properties, type: String -// check :attributes, type: String -// end diff --git a/mmv1/api/resource/examples.go b/mmv1/api/resource/examples.go index bc24fede5793..8d618b7b94fb 100644 --- a/mmv1/api/resource/examples.go +++ b/mmv1/api/resource/examples.go @@ -16,15 +16,16 @@ package resource import ( "bytes" "fmt" + "log" "net/url" "path/filepath" "regexp" + "slices" "strings" "text/template" "github.com/GoogleCloudPlatform/magic-modules/mmv1/google" "github.com/golang/glog" - "gopkg.in/yaml.v3" ) // Generates configs to be shown as examples in docs and outputted as tests @@ -163,11 +164,11 @@ type Examples struct { } // Set default value for fields -func (e *Examples) UnmarshalYAML(n *yaml.Node) error { +func (e *Examples) UnmarshalYAML(unmarshal func(any) error) error { type exampleAlias Examples aliasObj := (*exampleAlias)(e) - err := n.Decode(&aliasObj) + err := unmarshal(aliasObj) if err != nil { return err } @@ -180,6 +181,33 @@ func (e *Examples) UnmarshalYAML(n *yaml.Node) error { return nil } +func (e *Examples) Validate(rName string) { + if e.Name == "" { + log.Fatalf("Missing `name` for one example in resource %s", rName) + } + e.ValidateExternalProviders() +} + +func (e *Examples) ValidateExternalProviders() { + // Official providers supported by HashiCorp + // https://registry.terraform.io/search/providers?namespace=hashicorp&tier=official + HASHICORP_PROVIDERS := []string{"aws", "random", "null", "template", "azurerm", "kubernetes", "local", + "external", "time", "vault", "archive", "tls", "helm", "azuread", "http", "cloudinit", "tfe", "dns", + "consul", "vsphere", "nomad", "awscc", "googleworkspace", "hcp", "boundary", "ad", "azurestack", "opc", + "oraclepaas", "hcs", "salesforce"} + + var unallowedProviders []string + for _, p := range e.ExternalProviders { + if !slices.Contains(HASHICORP_PROVIDERS, p) { + unallowedProviders = append(unallowedProviders, p) + } + } + + if len(unallowedProviders) > 0 { + log.Fatalf("Providers %#v are not allowed. Only providers published by HashiCorp are allowed.", unallowedProviders) + } +} + // Executes example templates for documentation and tests func (e *Examples) SetHCLText() { originalVars := e.Vars @@ -357,9 +385,6 @@ func SubstituteTestPaths(config string) string { // check :skip_vcr, type: TrueClass // } -// TODO -// validate_external_providers - // func (e *Examples) merge(other) { // result = self.class.new // instance_variables.each do |v| diff --git a/mmv1/api/resource/iam_policy.go b/mmv1/api/resource/iam_policy.go index 812e18ab8170..18708e48fd57 100644 --- a/mmv1/api/resource/iam_policy.go +++ b/mmv1/api/resource/iam_policy.go @@ -14,7 +14,8 @@ package resource import ( - "gopkg.in/yaml.v3" + "log" + "slices" ) // Information about the IAM policy for this resource @@ -117,7 +118,7 @@ type IamPolicy struct { SubstituteZoneValue bool `yaml:"substitute_zone_value"` } -func (p *IamPolicy) UnmarshalYAML(n *yaml.Node) error { +func (p *IamPolicy) UnmarshalYAML(unmarshal func(any) error) error { p.MethodNameSeparator = "/" p.FetchIamPolicyVerb = "GET" p.FetchIamPolicyMethod = "getIamPolicy" @@ -132,7 +133,7 @@ func (p *IamPolicy) UnmarshalYAML(n *yaml.Node) error { type iamPolicyAlias IamPolicy aliasObj := (*iamPolicyAlias)(p) - err := n.Decode(&aliasObj) + err := unmarshal(aliasObj) if err != nil { return err } @@ -140,6 +141,19 @@ func (p *IamPolicy) UnmarshalYAML(n *yaml.Node) error { return nil } -// func (p *IamPolicy) validate() { +func (p *IamPolicy) Validate(rName string) { + allowed := []string{"GET", "POST"} + if !slices.Contains(allowed, p.FetchIamPolicyVerb) { + log.Fatalf("Value on `fetch_iam_policy_verb` should be one of %#v in resource %s", allowed, rName) + } + + allowed = []string{"POST", "PUT"} + if !slices.Contains(allowed, p.SetIamPolicyVerb) { + log.Fatalf("Value on `set_iam_policy_verb` should be one of %#v in resource %s", allowed, rName) + } -// } + allowed = []string{"REQUEST_BODY", "QUERY_PARAM", "QUERY_PARAM_NESTED"} + if p.IamConditionsRequestType != "" && !slices.Contains(allowed, p.IamConditionsRequestType) { + log.Fatalf("Value on `iam_conditions_request_type` should be one of %#v in resource %s", allowed, rName) + } +} diff --git a/mmv1/api/resource/nested_query.go b/mmv1/api/resource/nested_query.go index 174f46f8d367..a0ebea0198e1 100644 --- a/mmv1/api/resource/nested_query.go +++ b/mmv1/api/resource/nested_query.go @@ -13,6 +13,8 @@ package resource +import "log" + // Metadata for resources that are nested within a parent resource, as // a list of resources or single object within the parent. // e.g. Fine-grained resources @@ -43,10 +45,8 @@ type NestedQuery struct { ModifyByPatch bool `yaml:"modify_by_patch"` } -// def validate -// super - -// check :keys, type: Array, item_type: String, required: true -// check :is_list_of_ids, type: :boolean, default: false -// check :modify_by_patch, type: :boolean, default: false -// end +func (q *NestedQuery) Validate(rName string) { + if len(q.Keys) == 0 { + log.Fatalf("Missing `keys` for `nested_query` in resource %s", rName) + } +} diff --git a/mmv1/api/resource/reference_links.go b/mmv1/api/resource/reference_links.go index 6237308ffb4d..5c4862bbaae8 100644 --- a/mmv1/api/resource/reference_links.go +++ b/mmv1/api/resource/reference_links.go @@ -13,14 +13,8 @@ package resource -import ( - "github.com/GoogleCloudPlatform/magic-modules/mmv1/google" -) - // Represents a list of documentation links. type ReferenceLinks struct { - google.YamlValidator - // guides containing // name: The title of the link // value: The URL to navigate on click @@ -33,7 +27,3 @@ type ReferenceLinks struct { //attr_reader Api string } - -// func (l *ReferenceLinks) validate() { - -// } diff --git a/mmv1/api/resource/sweeper.go b/mmv1/api/resource/sweeper.go index 7ba3e789e203..ebc078c5a770 100644 --- a/mmv1/api/resource/sweeper.go +++ b/mmv1/api/resource/sweeper.go @@ -19,9 +19,3 @@ type Sweeper struct { // eligibility for deletion for generated resources SweepableIdentifierField string `yaml:"sweepable_identifier_field"` } - -// def validate -// super - -// check :sweepable_identifier_field, type: String -// end diff --git a/mmv1/api/resource/validation.go b/mmv1/api/resource/validation.go index a49ba30a7ddb..24cbfb557758 100644 --- a/mmv1/api/resource/validation.go +++ b/mmv1/api/resource/validation.go @@ -20,10 +20,3 @@ type Validation struct { Regex string Function string } - -// def validate -// super - -// check :regex, type: String -// check :function, type: String -// end diff --git a/mmv1/api/timeouts.go b/mmv1/api/timeouts.go index 5e8c77ac76e8..93380777e660 100644 --- a/mmv1/api/timeouts.go +++ b/mmv1/api/timeouts.go @@ -40,11 +40,3 @@ func NewTimeouts() *Timeouts { DeleteMinutes: DEFAULT_DELETE_TIMEOUT_MINUTES, } } - -// def validate -// super - -// check :insert_minutes, type: Integer, default: DEFAULT_INSERT_TIMEOUT_MINUTES -// check :update_minutes, type: Integer, default: DEFAULT_UPDATE_TIMEOUT_MINUTES -// check :delete_minutes, type: Integer, default: DEFAULT_DELETE_TIMEOUT_MINUTES -// end diff --git a/mmv1/api/type.go b/mmv1/api/type.go index b83028e81b5f..df6cbfb18bf7 100644 --- a/mmv1/api/type.go +++ b/mmv1/api/type.go @@ -337,6 +337,28 @@ func (t *Type) SetDefault(r *Resource) { } } +func (t *Type) Validate(rName string) { + if t.Output && t.Required { + log.Fatalf("Property %s cannot be output and required at the same time in resource %s.", t.Name, rName) + } + + if t.DefaultFromApi && t.DefaultValue != nil { + log.Fatalf("'default_value' and 'default_from_api' cannot be both set in resource %s", rName) + } + + switch { + case t.IsA("Array"): + t.ItemType.Validate(rName) + case t.IsA("Map"): + t.ValueType.Validate(rName) + case t.IsA("NestedObject"): + for _, p := range t.Properties { + p.Validate(rName) + } + default: + } +} + // super // check :description, type: ::String, required: true // check :exclude, type: :boolean, default: false, required: true diff --git a/mmv1/go.mod b/mmv1/go.mod index 3312c96d0e5b..6ba2aa672c36 100644 --- a/mmv1/go.mod +++ b/mmv1/go.mod @@ -4,7 +4,7 @@ go 1.21 require ( golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 - gopkg.in/yaml.v3 v3.0.1 + gopkg.in/yaml.v2 v2.4.0 ) require github.com/golang/glog v1.2.0 diff --git a/mmv1/go.sum b/mmv1/go.sum index 9e56cc0f1cb1..02e4ed2c5647 100644 --- a/mmv1/go.sum +++ b/mmv1/go.sum @@ -6,5 +6,5 @@ golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 h1:LfspQV/FYTatPTr/3HzIcmiUF golang.org/x/exp v0.0.0-20240222234643-814bf88cf225/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= diff --git a/mmv1/google/yaml_validator.go b/mmv1/google/yaml_validator.go index 485db94d1dd0..811246cea98e 100644 --- a/mmv1/google/yaml_validator.go +++ b/mmv1/google/yaml_validator.go @@ -16,7 +16,7 @@ package google import ( "log" - "gopkg.in/yaml.v3" + "gopkg.in/yaml.v2" ) // A helper class to validate contents coming from YAML files. @@ -26,7 +26,7 @@ func (v *YamlValidator) Parse(content []byte, obj interface{}, yamlPath string) // TODO(nelsonjr): Allow specifying which symbols to restrict it further. // But it requires inspecting all configuration files for symbol sources, // such as Enum values. Leaving it as a nice-to-have for the future. - if err := yaml.Unmarshal(content, obj); err != nil { + if err := yaml.UnmarshalStrict(content, obj); err != nil { log.Fatalf("Cannot unmarshal data from file %s: %v", yamlPath, err) } } From bd724bdbb0031af8998ae982867ae37727e2bd94 Mon Sep 17 00:00:00 2001 From: Ian Milligan Date: Tue, 3 Sep 2024 14:24:47 -0700 Subject: [PATCH 22/60] Add service mesh field to Cloud Run v2 Service (#11502) Co-authored-by: Zhenhua Li --- mmv1/products/cloudrunv2/Service.yaml | 22 ++++ .../examples/cloudrunv2_service_mesh.tf.erb | 29 +++++ .../resource_cloud_run_v2_service_test.go.erb | 121 ++++++++++++++++++ 3 files changed, 172 insertions(+) create mode 100644 mmv1/templates/terraform/examples/cloudrunv2_service_mesh.tf.erb diff --git a/mmv1/products/cloudrunv2/Service.yaml b/mmv1/products/cloudrunv2/Service.yaml index 03234194916a..8cb2b9e10cb5 100644 --- a/mmv1/products/cloudrunv2/Service.yaml +++ b/mmv1/products/cloudrunv2/Service.yaml @@ -160,6 +160,18 @@ examples: ignore_read_extra: - 'deletion_protection' + - !ruby/object:Provider::Terraform::Examples + name: 'cloudrunv2_service_mesh' + min_version: 'beta' + external_providers: ['time'] + primary_resource_id: 'default' + primary_resource_name: "fmt.Sprintf(\"tf-test-cloudrun-service-%s\", context[\"random_suffix\"])" + vars: + cloud_run_service_name: 'cloudrun-service' + mesh_name: 'network-services-mesh' + ignore_read_extra: + - 'deletion_protection' + parameters: - !ruby/object:Api::Type::String name: 'location' @@ -877,6 +889,16 @@ properties: name: 'sessionAffinity' description: |- Enables session affinity. For more information, go to https://cloud.google.com/run/docs/configuring/session-affinity + - !ruby/object:Api::Type::NestedObject + name: 'serviceMesh' + min_version: beta + description: |- + Enables Cloud Service Mesh for this Revision. + properties: + - !ruby/object:Api::Type::String + name: 'mesh' + description: |- + The Mesh resource name. For more information see https://cloud.google.com/service-mesh/docs/reference/network-services/rest/v1/projects.locations.meshes#resource:-mesh. - !ruby/object:Api::Type::Array name: 'traffic' description: |- diff --git a/mmv1/templates/terraform/examples/cloudrunv2_service_mesh.tf.erb b/mmv1/templates/terraform/examples/cloudrunv2_service_mesh.tf.erb new file mode 100644 index 000000000000..58fe085c2284 --- /dev/null +++ b/mmv1/templates/terraform/examples/cloudrunv2_service_mesh.tf.erb @@ -0,0 +1,29 @@ +resource "google_cloud_run_v2_service" "<%= ctx[:primary_resource_id] %>" { + provider = google-beta + name = "<%= ctx[:vars]['cloud_run_service_name'] %>" + depends_on = [time_sleep.wait_for_mesh] + deletion_protection = false + + location = "us-central1" + launch_stage = "BETA" + + template { + containers { + image = "us-docker.pkg.dev/cloudrun/container/hello" + } + service_mesh { + mesh = google_network_services_mesh.mesh.id + } + } +} + +resource "time_sleep" "wait_for_mesh" { + depends_on = [google_network_services_mesh.mesh] + + create_duration = "1m" +} + +resource "google_network_services_mesh" "mesh" { + provider = google-beta + name = "<%= ctx[:vars]['mesh_name'] %>" +} diff --git a/mmv1/third_party/terraform/services/cloudrunv2/resource_cloud_run_v2_service_test.go.erb b/mmv1/third_party/terraform/services/cloudrunv2/resource_cloud_run_v2_service_test.go.erb index 9d2a77a252f0..55ebef510c74 100644 --- a/mmv1/third_party/terraform/services/cloudrunv2/resource_cloud_run_v2_service_test.go.erb +++ b/mmv1/third_party/terraform/services/cloudrunv2/resource_cloud_run_v2_service_test.go.erb @@ -1076,3 +1076,124 @@ resource "google_cloud_run_v2_service" "default" { `, context) } <% end -%> + +<% unless version == 'ga' -%> +func TestAccCloudRunV2Service_cloudrunv2ServiceMeshUpdate(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + ExternalProviders: map[string]resource.ExternalProvider{ + "time": {}, + }, + CheckDestroy: testAccCheckCloudRunV2ServiceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudRunV2Service_cloudrunv2ServiceMesh(context), + }, + { + ResourceName: "google_cloud_run_v2_service.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "location", "annotations", "labels", "terraform_labels", "launch_stage", "deletion_protection"}, + }, + { + Config: testAccCloudRunV2Service_cloudrunv2ServiceMeshUpdate(context), + }, + { + ResourceName: "google_cloud_run_v2_service.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "location", "annotations", "labels", "terraform_labels", "launch_stage", "deletion_protection"}, + }, + }, + }) +} + +func testAccCloudRunV2Service_cloudrunv2ServiceMesh(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_cloud_run_v2_service" "default" { + provider = google-beta + name = "tf-test-cloudrun-service%{random_suffix}" + location = "us-central1" + deletion_protection = false + depends_on = [time_sleep.wait_for_mesh] + launch_stage = "BETA" + template { + containers { + name = "container-1" + image = "us-docker.pkg.dev/cloudrun/container/hello" + } + service_mesh { + mesh = google_network_services_mesh.mesh.id + } + } +} + +resource "time_sleep" "wait_for_mesh" { + depends_on = [ + google_network_services_mesh.mesh, + google_network_services_mesh.new_mesh, + ] + + create_duration = "1m" +} + +resource "google_network_services_mesh" "mesh" { + provider = google-beta + name = "tf-test-mesh%{random_suffix}" +} + +resource "google_network_services_mesh" "new_mesh" { + provider = google-beta + name = "tf-test-new-mesh%{random_suffix}" +} +`, context) +} + +func testAccCloudRunV2Service_cloudrunv2ServiceMeshUpdate(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_cloud_run_v2_service" "default" { + provider = google-beta + name = "tf-test-cloudrun-service%{random_suffix}" + location = "us-central1" + deletion_protection = false + depends_on = [time_sleep.wait_for_mesh] + launch_stage = "BETA" + template { + containers { + name = "container-1" + image = "us-docker.pkg.dev/cloudrun/container/hello" + } + service_mesh { + mesh = google_network_services_mesh.new_mesh.id + } + } +} + +resource "time_sleep" "wait_for_mesh" { + depends_on = [ + google_network_services_mesh.mesh, + google_network_services_mesh.new_mesh, + ] + + create_duration = "1m" +} + +resource "google_network_services_mesh" "mesh" { + provider = google-beta + name = "tf-test-mesh%{random_suffix}" +} + +resource "google_network_services_mesh" "new_mesh" { + provider = google-beta + name = "tf-test-new-mesh%{random_suffix}" +} +`, context) +} +<% end -%> From 1209495e43fe5c24eef8c0b5d78b92cd505dcb19 Mon Sep 17 00:00:00 2001 From: hao-nan-li <100219545+hao-nan-li@users.noreply.github.com> Date: Tue, 3 Sep 2024 15:19:19 -0700 Subject: [PATCH 23/60] Upgrade to dcl v1.72.0 (#11580) --- mmv1/third_party/terraform/go.mod.erb | 2 +- mmv1/third_party/terraform/go.sum | 6 ++-- .../samples/basic.workload.json | 2 +- .../split_billing_partner.workload.json | 30 +++++++++++++++++++ .../split_billing_partner_workload.yaml | 28 +++++++++++++++++ tpgtools/go.mod | 2 +- tpgtools/go.sum | 4 +-- .../samples/workload/meta.yaml | 1 + 8 files changed, 67 insertions(+), 8 deletions(-) create mode 100644 tpgtools/api/assuredworkloads/samples/split_billing_partner.workload.json create mode 100644 tpgtools/api/assuredworkloads/samples/split_billing_partner_workload.yaml diff --git a/mmv1/third_party/terraform/go.mod.erb b/mmv1/third_party/terraform/go.mod.erb index 829e52042f60..679ce956db06 100644 --- a/mmv1/third_party/terraform/go.mod.erb +++ b/mmv1/third_party/terraform/go.mod.erb @@ -5,7 +5,7 @@ go 1.21 require ( cloud.google.com/go/bigtable v1.30.0 - github.com/GoogleCloudPlatform/declarative-resource-client-library v1.71.0 + github.com/GoogleCloudPlatform/declarative-resource-client-library v1.72.0 github.com/apparentlymart/go-cidr v1.1.0 github.com/davecgh/go-spew v1.1.1 github.com/dnaeon/go-vcr v1.0.1 diff --git a/mmv1/third_party/terraform/go.sum b/mmv1/third_party/terraform/go.sum index 495dbf5b2e04..8213bad59ed5 100644 --- a/mmv1/third_party/terraform/go.sum +++ b/mmv1/third_party/terraform/go.sum @@ -22,8 +22,8 @@ cloud.google.com/go/monitoring v1.20.4/go.mod h1:v7F/UcLRw15EX7xq565N7Ae5tnYEE28 dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/GoogleCloudPlatform/declarative-resource-client-library v1.71.0 h1:vRKCLiR3faPmXAoqSdwXLv28/kygggzaKXzgdm6GXhg= -github.com/GoogleCloudPlatform/declarative-resource-client-library v1.71.0/go.mod h1:pL2Qt5HT+x6xrTd806oMiM3awW6kNIXB/iiuClz6m6k= +github.com/GoogleCloudPlatform/declarative-resource-client-library v1.72.0 h1:VodSRLhOrb8hhRbPre275EreP4vTiaejdBcvd2MCtX4= +github.com/GoogleCloudPlatform/declarative-resource-client-library v1.72.0/go.mod h1:pL2Qt5HT+x6xrTd806oMiM3awW6kNIXB/iiuClz6m6k= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/ProtonMail/go-crypto v1.1.0-alpha.2 h1:bkyFVUP+ROOARdgCiJzNQo2V2kiB97LyUpzH9P6Hrlg= @@ -439,4 +439,4 @@ gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= rsc.io/binaryregexp v0.2.0 h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= \ No newline at end of file +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= diff --git a/tpgtools/api/assuredworkloads/samples/basic.workload.json b/tpgtools/api/assuredworkloads/samples/basic.workload.json index 5b280bd44047..c3d1aa445c1e 100755 --- a/tpgtools/api/assuredworkloads/samples/basic.workload.json +++ b/tpgtools/api/assuredworkloads/samples/basic.workload.json @@ -16,7 +16,7 @@ "resourceSettings": [ { "resourceType": "CONSUMER_FOLDER", - "displayName": "folder-display-name" + "displayName": "{{name}}" }, { "resourceType": "ENCRYPTION_KEYS_PROJECT" diff --git a/tpgtools/api/assuredworkloads/samples/split_billing_partner.workload.json b/tpgtools/api/assuredworkloads/samples/split_billing_partner.workload.json new file mode 100644 index 000000000000..75652da283f5 --- /dev/null +++ b/tpgtools/api/assuredworkloads/samples/split_billing_partner.workload.json @@ -0,0 +1,30 @@ +{ + "organization": "{{org_id}}", + "location": "europe-west8", + "displayName": "{{display}}", + "complianceRegime": "ASSURED_WORKLOADS_FOR_PARTNERS", + "billingAccount": "billingAccounts/{{billing_account}}", + "partnerServicesBillingAccount": "billingAccounts/01BF3F-2C6DE5-30C607", + "labels": { + "label-one": "value-one" + }, + "partner": "SOVEREIGN_CONTROLS_BY_PSN", + "partnerPermissions": { + "dataLogsViewer": true, + "serviceAccessApprover": true, + "assuredWorkloadsMonitoring": true + }, + "violationNotificationsEnabled": true, + "resourceSettings": [ + { + "resourceType": "CONSUMER_FOLDER" + }, + { + "resourceType": "ENCRYPTION_KEYS_PROJECT" + }, + { + "resourceId": "{{ring}}", + "resourceType": "KEYRING" + } + ] + } \ No newline at end of file diff --git a/tpgtools/api/assuredworkloads/samples/split_billing_partner_workload.yaml b/tpgtools/api/assuredworkloads/samples/split_billing_partner_workload.yaml new file mode 100644 index 000000000000..92533710e151 --- /dev/null +++ b/tpgtools/api/assuredworkloads/samples/split_billing_partner_workload.yaml @@ -0,0 +1,28 @@ +# Copyright 2024 Google LLC. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +name: split_billing_partner_workload +description: A Split billing partner test of the assuredworkloads api +type: workload +versions: +- beta +resource: samples/split_billing_partner.workload.json +variables: +- name: billing_account + type: billing_account +- name: display + type: resource_name +- name: org_id + type: org_id +- name: ring + type: resource_name \ No newline at end of file diff --git a/tpgtools/go.mod b/tpgtools/go.mod index c5276bae889e..6e2919cc52ff 100644 --- a/tpgtools/go.mod +++ b/tpgtools/go.mod @@ -4,7 +4,7 @@ go 1.21 require ( bitbucket.org/creachadair/stringset v0.0.11 - github.com/GoogleCloudPlatform/declarative-resource-client-library v1.71.0 + github.com/GoogleCloudPlatform/declarative-resource-client-library v1.72.0 github.com/golang/glog v1.1.2 github.com/hashicorp/hcl v1.0.0 github.com/kylelemons/godebug v1.1.0 diff --git a/tpgtools/go.sum b/tpgtools/go.sum index a55d3d4bdb04..d48d0baafa4c 100644 --- a/tpgtools/go.sum +++ b/tpgtools/go.sum @@ -6,8 +6,8 @@ cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdi cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/GoogleCloudPlatform/declarative-resource-client-library v1.71.0 h1:vRKCLiR3faPmXAoqSdwXLv28/kygggzaKXzgdm6GXhg= -github.com/GoogleCloudPlatform/declarative-resource-client-library v1.71.0/go.mod h1:pL2Qt5HT+x6xrTd806oMiM3awW6kNIXB/iiuClz6m6k= +github.com/GoogleCloudPlatform/declarative-resource-client-library v1.72.0 h1:VodSRLhOrb8hhRbPre275EreP4vTiaejdBcvd2MCtX4= +github.com/GoogleCloudPlatform/declarative-resource-client-library v1.72.0/go.mod h1:pL2Qt5HT+x6xrTd806oMiM3awW6kNIXB/iiuClz6m6k= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= diff --git a/tpgtools/overrides/assuredworkloads/samples/workload/meta.yaml b/tpgtools/overrides/assuredworkloads/samples/workload/meta.yaml index 5699399c2d57..7911ffc518bf 100644 --- a/tpgtools/overrides/assuredworkloads/samples/workload/meta.yaml +++ b/tpgtools/overrides/assuredworkloads/samples/workload/meta.yaml @@ -6,6 +6,7 @@ ignore_read: - "kms_settings" - "resource_settings" - "provisioned_resources_parent" + - "partner_services_billing_account" doc_hide: - basic.tf.tmpl # basic_update.tf.tmpl auto hides - full.tf.tmpl From 8d436da792aa7feeca36231d6ba863150d517845 Mon Sep 17 00:00:00 2001 From: Benjamin Kaplan <58792807+bskaplan@users.noreply.github.com> Date: Tue, 3 Sep 2024 16:04:38 -0700 Subject: [PATCH 24/60] feat: support nfs and gcs in cloudrun and cloudrunv2 GA provider (#11503) Co-authored-by: Benjamin Kaplan --- mmv1/products/cloudrun/Service.yaml | 8 +- mmv1/products/cloudrunv2/Job.yaml | 6 +- mmv1/products/cloudrunv2/Service.yaml | 18 ++-- .../cloudrunv2_service_mount_gcs.tf.erb | 2 +- .../cloudrunv2_service_mount_nfs.tf.erb | 1 - .../resource_cloud_run_service_test.go.erb | 87 ++++++++++++++----- .../resource_cloud_run_v2_job_test.go.erb | 15 +++- .../resource_cloud_run_v2_service_test.go.erb | 4 +- 8 files changed, 91 insertions(+), 50 deletions(-) diff --git a/mmv1/products/cloudrun/Service.yaml b/mmv1/products/cloudrun/Service.yaml index 993c12f4c2c0..2a277edcd829 100644 --- a/mmv1/products/cloudrun/Service.yaml +++ b/mmv1/products/cloudrun/Service.yaml @@ -851,7 +851,6 @@ properties: name: csi description: |- A filesystem specified by the Container Storage Interface (CSI). - min_version: beta properties: - !ruby/object:Api::Type::String name: 'driver' @@ -859,8 +858,7 @@ properties: description: |- Unique name representing the type of file system to be created. Cloud Run supports the following values: * gcsfuse.run.googleapis.com: Mount a Google Cloud Storage bucket using GCSFuse. This driver requires the - run.googleapis.com/execution-environment annotation to be set to "gen2" and - run.googleapis.com/launch-stage set to "BETA" or "ALPHA". + run.googleapis.com/execution-environment annotation to be unset or set to "gen2" - !ruby/object:Api::Type::Boolean name: 'readOnly' default_from_api: true @@ -876,9 +874,7 @@ properties: name: nfs description: |- A filesystem backed by a Network File System share. This filesystem requires the - run.googleapis.com/execution-environment annotation to be set to "gen2" and - run.googleapis.com/launch-stage set to "BETA" or "ALPHA". - min_version: beta + run.googleapis.com/execution-environment annotation to be unset or set to "gen2" properties: - !ruby/object:Api::Type::String name: server diff --git a/mmv1/products/cloudrunv2/Job.yaml b/mmv1/products/cloudrunv2/Job.yaml index 6a18f36ab0f3..8159e8621117 100644 --- a/mmv1/products/cloudrunv2/Job.yaml +++ b/mmv1/products/cloudrunv2/Job.yaml @@ -535,8 +535,7 @@ properties: - !ruby/object:Api::Type::NestedObject name: 'gcs' description: |- - Cloud Storage bucket mounted as a volume using GCSFuse. This feature requires the launch stage to be set to ALPHA or BETA. - min_version: beta + Cloud Storage bucket mounted as a volume using GCSFuse. # exactly_one_of: # - template.0.volumes.0.secret # - template.0.volumes.0.cloudSqlInstance @@ -556,8 +555,7 @@ properties: - !ruby/object:Api::Type::NestedObject name: 'nfs' description: |- - NFS share mounted as a volume. This feature requires the launch stage to be set to ALPHA or BETA. - min_version: beta + NFS share mounted as a volume. # exactly_one_of: # - template.0.volumes.0.secret # - template.0.volumes.0.cloudSqlInstance diff --git a/mmv1/products/cloudrunv2/Service.yaml b/mmv1/products/cloudrunv2/Service.yaml index 8cb2b9e10cb5..d6c36d475bbe 100644 --- a/mmv1/products/cloudrunv2/Service.yaml +++ b/mmv1/products/cloudrunv2/Service.yaml @@ -670,10 +670,10 @@ properties: HTTPGet specifies the http request to perform. Exactly one of HTTPGet or TCPSocket must be specified. send_empty_value: true allow_empty_object: true - # exactly_one_of: - # - template.0.containers.0.startupProbe.0.httpGet - # - template.0.containers.0.startupProbe.0.tcpSocket - # - template.0.containers.0.startupProbe.0.grpc + # exactly_one_of: + # - template.0.containers.0.startupProbe.0.httpGet + # - template.0.containers.0.startupProbe.0.tcpSocket + # - template.0.containers.0.startupProbe.0.grpc properties: - !ruby/object:Api::Type::String name: 'path' @@ -709,10 +709,10 @@ properties: TCPSocket specifies an action involving a TCP port. Exactly one of HTTPGet or TCPSocket must be specified. send_empty_value: true allow_empty_object: true - # exactly_one_of: - # - template.0.containers.0.startupProbe.0.httpGet - # - template.0.containers.0.startupProbe.0.tcpSocket - # - template.0.containers.0.startupProbe.0.grpc + # exactly_one_of: + # - template.0.containers.0.startupProbe.0.httpGet + # - template.0.containers.0.startupProbe.0.tcpSocket + # - template.0.containers.0.startupProbe.0.grpc properties: - !ruby/object:Api::Type::Integer name: port @@ -837,7 +837,7 @@ properties: - !ruby/object:Api::Type::NestedObject name: 'gcs' description: |- - Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment and requires launch-stage to be set to ALPHA or BETA. + Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment. # exactly_one_of: # - template.0.volumes.0.secret # - template.0.volumes.0.cloudSqlInstance diff --git a/mmv1/templates/terraform/examples/cloudrunv2_service_mount_gcs.tf.erb b/mmv1/templates/terraform/examples/cloudrunv2_service_mount_gcs.tf.erb index 396dd3a845af..6742950f553f 100644 --- a/mmv1/templates/terraform/examples/cloudrunv2_service_mount_gcs.tf.erb +++ b/mmv1/templates/terraform/examples/cloudrunv2_service_mount_gcs.tf.erb @@ -3,7 +3,7 @@ resource "google_cloud_run_v2_service" "<%= ctx[:primary_resource_id] %>" { location = "us-central1" deletion_protection = false - launch_stage = "BETA" + template { execution_environment = "EXECUTION_ENVIRONMENT_GEN2" diff --git a/mmv1/templates/terraform/examples/cloudrunv2_service_mount_nfs.tf.erb b/mmv1/templates/terraform/examples/cloudrunv2_service_mount_nfs.tf.erb index e8c684382c97..302689328e38 100644 --- a/mmv1/templates/terraform/examples/cloudrunv2_service_mount_nfs.tf.erb +++ b/mmv1/templates/terraform/examples/cloudrunv2_service_mount_nfs.tf.erb @@ -4,7 +4,6 @@ resource "google_cloud_run_v2_service" "<%= ctx[:primary_resource_id] %>" { location = "us-central1" deletion_protection = false ingress = "INGRESS_TRAFFIC_ALL" - launch_stage = "BETA" template { execution_environment = "EXECUTION_ENVIRONMENT_GEN2" diff --git a/mmv1/third_party/terraform/services/cloudrun/resource_cloud_run_service_test.go.erb b/mmv1/third_party/terraform/services/cloudrun/resource_cloud_run_service_test.go.erb index a94d36b1b336..dd99e80cb7db 100644 --- a/mmv1/third_party/terraform/services/cloudrun/resource_cloud_run_service_test.go.erb +++ b/mmv1/third_party/terraform/services/cloudrun/resource_cloud_run_service_test.go.erb @@ -1313,7 +1313,6 @@ resource "google_cloud_run_service" "default" { `, context) } -<% unless version == 'ga' -%> func TestAccCloudRunService_csiVolume(t *testing.T) { acctest.SkipIfVcr(t) @@ -1324,10 +1323,10 @@ func TestAccCloudRunService_csiVolume(t *testing.T) { acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), Steps: []resource.TestStep{ { - Config: testAccCloudRunService_cloudRunServiceWithEmptyDirVolume(name, project), + Config: testAccCloudRunService_cloudRunServiceWithNoVolume(name, project), }, { ResourceName: "google_cloud_run_service.default", @@ -1349,10 +1348,9 @@ func TestAccCloudRunService_csiVolume(t *testing.T) { } -func testAccCloudRunService_cloudRunServiceWithEmptyDirVolume(name, project string) string { +func testAccCloudRunService_cloudRunServiceWithNoVolume(name, project string) string { return fmt.Sprintf(` resource "google_cloud_run_service" "default" { - provider = google-beta name = "%s" location = "us-central1" @@ -1360,7 +1358,6 @@ resource "google_cloud_run_service" "default" { namespace = "%s" annotations = { generated-by = "magic-modules" - "run.googleapis.com/launch-stage" = "BETA" } } @@ -1368,14 +1365,6 @@ resource "google_cloud_run_service" "default" { spec { containers { image = "gcr.io/cloudrun/hello" - volume_mounts { - name = "vol1" - mount_path = "/mnt/vol1" - } - } - volumes { - name = "vol1" - empty_dir { size_limit = "256Mi" } } } } @@ -1389,10 +1378,10 @@ resource "google_cloud_run_service" "default" { `, name, project) } + func testAccCloudRunService_cloudRunServiceUpdateWithGcsVolume(name, project string) string { return fmt.Sprintf(` resource "google_cloud_run_service" "default" { - provider = google-beta name = "%s" location = "us-central1" @@ -1400,7 +1389,6 @@ resource "google_cloud_run_service" "default" { namespace = "%s" annotations = { generated-by = "magic-modules" - "run.googleapis.com/launch-stage" = "BETA" } } @@ -1431,13 +1419,68 @@ resource "google_cloud_run_service" "default" { } } - lifecycle { - ignore_changes = [ - metadata.0.annotations, - ] - } } `, name, project) } -<% end -%> + <% unless version == 'ga' -%> + +func TestAccCloudRunService_emptyDirVolume(t *testing.T) { + t.Parallel() + + project := envvar.GetTestProjectFromEnv() + name := "tftest-cloudrun-" + acctest.RandString(t, 6) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudRunService_cloudRunServiceWithEmptyDirVolume(name, project), + }, + { + ResourceName: "google_cloud_run_service.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"metadata.0.resource_version", "metadata.0.annotations", "metadata.0.labels", "metadata.0.terraform_labels", "status.0.conditions"}, + }, + }, + }) + } + + +func testAccCloudRunService_cloudRunServiceWithEmptyDirVolume(name, project string) string { + return fmt.Sprintf(` +resource "google_cloud_run_service" "default" { + provider = google-beta + name = "%s" + location = "us-central1" + + metadata { + namespace = "%s" + annotations = { + generated-by = "magic-modules" + "run.googleapis.com/launch-stage" = "BETA" + } + } + + template { + spec { + containers { + image = "gcr.io/cloudrun/hello" + volume_mounts { + name = "vol1" + mount_path = "/mnt/vol1" + } + } + volumes { + name = "vol1" + empty_dir { size_limit = "256Mi" } + } + } + } + +} +`, name, project) +} + <% end -%> diff --git a/mmv1/third_party/terraform/services/cloudrunv2/resource_cloud_run_v2_job_test.go.erb b/mmv1/third_party/terraform/services/cloudrunv2/resource_cloud_run_v2_job_test.go.erb index 114ed5e58652..2aab4e925374 100644 --- a/mmv1/third_party/terraform/services/cloudrunv2/resource_cloud_run_v2_job_test.go.erb +++ b/mmv1/third_party/terraform/services/cloudrunv2/resource_cloud_run_v2_job_test.go.erb @@ -309,7 +309,6 @@ func testAccCloudRunV2Job_cloudrunv2JobWithDirectVPCAndNamedBinAuthPolicyUpdate( `, context) } -<% unless version == 'ga' -%> func TestAccCloudRunV2Job_cloudrunv2JobWithGcsUpdate(t *testing.T) { acctest.SkipIfVcr(t) t.Parallel() @@ -352,7 +351,6 @@ func testAccCloudRunV2Job_cloudrunv2JobWithNoVolume(context map[string]interface name = "%{job_name}" location = "us-central1" deletion_protection = false - launch_stage = "BETA" template { template { containers { @@ -376,7 +374,6 @@ func testAccCloudRunV2Job_cloudrunv2JobWithGcsVolume(context map[string]interfac name = "%{job_name}" location = "us-central1" deletion_protection = false - launch_stage = "BETA" template { template { containers { @@ -395,6 +392,11 @@ func testAccCloudRunV2Job_cloudrunv2JobWithGcsVolume(context map[string]interfac } } } + lifecycle { + ignore_changes = [ + launch_stage, + ] + } } `, context) } @@ -441,7 +443,6 @@ func testAccCloudRunV2Job_cloudrunv2JobWithNfsVolume(context map[string]interfac name = "%{job_name}" location = "us-central1" deletion_protection = false - launch_stage = "BETA" template { template { containers { @@ -461,10 +462,16 @@ func testAccCloudRunV2Job_cloudrunv2JobWithNfsVolume(context map[string]interfac } } } + lifecycle { + ignore_changes = [ + launch_stage, + ] + } } `, context) } +<% unless version == 'ga' -%> func TestAccCloudRunV2Job_cloudrunv2JobWithStartExecutionTokenUpdate(t *testing.T) { t.Parallel() diff --git a/mmv1/third_party/terraform/services/cloudrunv2/resource_cloud_run_v2_service_test.go.erb b/mmv1/third_party/terraform/services/cloudrunv2/resource_cloud_run_v2_service_test.go.erb index 55ebef510c74..3e5cce0ddfe6 100644 --- a/mmv1/third_party/terraform/services/cloudrunv2/resource_cloud_run_v2_service_test.go.erb +++ b/mmv1/third_party/terraform/services/cloudrunv2/resource_cloud_run_v2_service_test.go.erb @@ -210,7 +210,6 @@ resource "google_compute_network" "custom_test" { } `, context) } -<% unless version == 'ga' -%> func TestAccCloudRunV2Service_cloudrunv2ServiceGcsVolume(t *testing.T) { acctest.SkipIfVcr(t) t.Parallel() @@ -244,7 +243,7 @@ resource "google_cloud_run_v2_service" "default" { description = "description creating" location = "us-central1" deletion_protection = false - launch_stage = "BETA" + annotations = { generated-by = "magic-modules" } @@ -313,7 +312,6 @@ resource "google_service_account" "service_account" { } `, context) } -<%end -%> func TestAccCloudRunV2Service_cloudrunv2ServiceTCPProbesUpdate(t *testing.T) { t.Parallel() From 1ed208786c29553f2ca18bc40fde6859b55f3f9d Mon Sep 17 00:00:00 2001 From: Daniel Rieske Date: Wed, 4 Sep 2024 01:29:28 +0200 Subject: [PATCH 25/60] Add certificate manager certificates datasource (#11543) --- .../certificatemanager/Certificate.yaml | 6 + .../provider/provider_mmv1_resources.go.erb | 1 + ...google_certificate_manager_certificates.go | 138 ++++++ ...e_certificate_manager_certificates_test.go | 405 ++++++++++++++++++ .../tpgresource/datasource_helpers.go | 6 + .../terraform/transport/config.go.erb | 15 + ...ificate_manager_certificates.html.markdown | 36 ++ 7 files changed, 607 insertions(+) create mode 100644 mmv1/third_party/terraform/services/certificatemanager/data_source_google_certificate_manager_certificates.go create mode 100644 mmv1/third_party/terraform/services/certificatemanager/data_source_google_certificate_manager_certificates_test.go create mode 100644 mmv1/third_party/terraform/website/docs/d/certificate_manager_certificates.html.markdown diff --git a/mmv1/products/certificatemanager/Certificate.yaml b/mmv1/products/certificatemanager/Certificate.yaml index 119379747fd0..8cc6cee712f6 100644 --- a/mmv1/products/certificatemanager/Certificate.yaml +++ b/mmv1/products/certificatemanager/Certificate.yaml @@ -138,6 +138,12 @@ properties: See https://cloud.google.com/compute/docs/regions-zones default_value: DEFAULT diff_suppress_func: 'certManagerDefaultScopeDiffSuppress' + - !ruby/object:Api::Type::Array + name: sanDnsnames + output: true + description: | + The list of Subject Alternative Names of dnsName type defined in the certificate (see RFC 5280 4.2.1.6) + item_type: Api::Type::String - !ruby/object:Api::Type::NestedObject name: selfManaged immutable: true diff --git a/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb b/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb index b3731e8c3124..561b463593d3 100644 --- a/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb +++ b/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb @@ -44,6 +44,7 @@ var handwrittenDatasources = map[string]*schema.Resource{ "google_billing_account": billing.DataSourceGoogleBillingAccount(), "google_bigquery_dataset": bigquery.DataSourceGoogleBigqueryDataset(), "google_bigquery_default_service_account": bigquery.DataSourceGoogleBigqueryDefaultServiceAccount(), + "google_certificate_manager_certificates": certificatemanager.DataSourceGoogleCertificateManagerCertificates(), "google_certificate_manager_certificate_map": certificatemanager.DataSourceGoogleCertificateManagerCertificateMap(), "google_cloudbuild_trigger": cloudbuild.DataSourceGoogleCloudBuildTrigger(), "google_cloudfunctions_function": cloudfunctions.DataSourceGoogleCloudFunctionsFunction(), diff --git a/mmv1/third_party/terraform/services/certificatemanager/data_source_google_certificate_manager_certificates.go b/mmv1/third_party/terraform/services/certificatemanager/data_source_google_certificate_manager_certificates.go new file mode 100644 index 000000000000..4af89936d94c --- /dev/null +++ b/mmv1/third_party/terraform/services/certificatemanager/data_source_google_certificate_manager_certificates.go @@ -0,0 +1,138 @@ +package certificatemanager + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "google.golang.org/api/certificatemanager/v1" +) + +func DataSourceGoogleCertificateManagerCertificates() *schema.Resource { + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceCertificateManagerCertificate().Schema) + tpgresource.DeleteFieldsFromSchema(dsSchema, "self_managed") + + return &schema.Resource{ + Read: dataSourceGoogleCertificateManagerCertificatesRead, + Schema: map[string]*schema.Schema{ + "filter": { + Type: schema.TypeString, + Optional: true, + }, + "region": { + Type: schema.TypeString, + Optional: true, + Default: "global", + }, + "certificates": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: dsSchema, + }, + }, + }, + } +} + +func dataSourceGoogleCertificateManagerCertificatesRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("error fetching project for certificate: %s", err) + } + + region, err := tpgresource.GetRegion(d, config) + if err != nil { + return fmt.Errorf("error fetching region for certificate: %s", err) + } + + filter := d.Get("filter").(string) + + certificates := make([]map[string]interface{}, 0) + certificatesList, err := config.NewCertificateManagerClient(userAgent).Projects.Locations.Certificates.List(fmt.Sprintf("projects/%s/locations/%s", project, region)).Filter(filter).Do() + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Certificates : %s %s", project, region)) + } + + for _, certificate := range certificatesList.Certificates { + if certificate != nil { + certificates = append(certificates, map[string]interface{}{ + "name": certificate.Name, + "description": certificate.Description, + "labels": certificate.Labels, + "location": region, + "managed": flattenCertificateManaged(certificate.Managed), + "san_dnsnames": certificate.SanDnsnames, + "scope": certificate.Scope, + }) + } + } + + if err := d.Set("certificates", certificates); err != nil { + return fmt.Errorf("error setting certificates: %s", err) + } + + d.SetId(fmt.Sprintf( + "projects/%s/locations/%s/certificates", + project, + region, + )) + + return nil +} + +func flattenCertificateManaged(v *certificatemanager.ManagedCertificate) interface{} { + if v == nil { + return nil + } + + output := make(map[string]interface{}) + + output["authorization_attempt_info"] = flattenCertificateManagedAuthorizationAttemptInfo(v.AuthorizationAttemptInfo) + output["dns_authorizations"] = v.DnsAuthorizations + output["domains"] = v.Domains + output["issuance_config"] = v.IssuanceConfig + output["state"] = v.State + output["provisioning_issue"] = flattenCertificateManagedProvisioningIssue(v.ProvisioningIssue) + + return []interface{}{output} +} + +func flattenCertificateManagedAuthorizationAttemptInfo(v []*certificatemanager.AuthorizationAttemptInfo) interface{} { + if v == nil { + return nil + } + + output := make([]interface{}, 0, len(v)) + + for _, authorizationAttemptInfo := range v { + output = append(output, map[string]interface{}{ + "details": authorizationAttemptInfo.Details, + "domain": authorizationAttemptInfo.Domain, + "failure_reason": authorizationAttemptInfo.FailureReason, + "state": authorizationAttemptInfo.State, + }) + } + + return output +} + +func flattenCertificateManagedProvisioningIssue(v *certificatemanager.ProvisioningIssue) interface{} { + if v == nil { + return nil + } + + output := make(map[string]interface{}) + + output["details"] = v.Details + output["reason"] = v.Reason + + return []interface{}{output} +} diff --git a/mmv1/third_party/terraform/services/certificatemanager/data_source_google_certificate_manager_certificates_test.go b/mmv1/third_party/terraform/services/certificatemanager/data_source_google_certificate_manager_certificates_test.go new file mode 100644 index 000000000000..63b9e34da98d --- /dev/null +++ b/mmv1/third_party/terraform/services/certificatemanager/data_source_google_certificate_manager_certificates_test.go @@ -0,0 +1,405 @@ +package certificatemanager_test + +import ( + "fmt" + "regexp" + "testing" + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" +) + +func TestAccDataSourceGoogleCertificateManagerCertificates_basic(t *testing.T) { + t.Parallel() + + // Resource identifier used for content testing + name := fmt.Sprintf("tf-test-certificate-%d", acctest.RandInt(t)) + description := "My acceptance data source test certificates" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccDataSourceGoogleCertificateManagerCertificates_basic(name, description), + Check: resource.ComposeTestCheckFunc( + resource.TestMatchResourceAttr("data.google_certificate_manager_certificates.certificates", "certificates.#", regexp.MustCompile("^[1-9]")), + + resource.TestCheckResourceAttrSet("data.google_certificate_manager_certificates.certificates", "region"), + resource.TestCheckResourceAttr("data.google_certificate_manager_certificates.certificates", "region", "global"), + ), + }, + }, + }) +} + +func TestAccDataSourceGoogleCertificateManagerCertificates_full(t *testing.T) { + t.Parallel() + + // Resource identifier used for content testing + region := "global" + id := fmt.Sprintf("projects/%s/locations/%s/certificates", envvar.GetTestProjectFromEnv(), region) + name := fmt.Sprintf("tf-test-certificate-%d", acctest.RandInt(t)) + description := "My acceptance data source test certificates" + certificateName := fmt.Sprintf("projects/%s/locations/%s/certificates/%s", envvar.GetTestProjectFromEnv(), region, name) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccDataSourceGoogleCertificateManagerCertificates_full(name, description), + Check: resource.ComposeTestCheckFunc( + resource.TestMatchResourceAttr("data.google_certificate_manager_certificates.certificates", "certificates.#", regexp.MustCompile("^[1-9]")), + + resource.TestCheckResourceAttrSet("data.google_certificate_manager_certificates.certificates", "id"), + resource.TestCheckResourceAttr("data.google_certificate_manager_certificates.certificates", "id", id), + + resource.TestCheckResourceAttrSet("data.google_certificate_manager_certificates.certificates", "region"), + resource.TestCheckResourceAttr("data.google_certificate_manager_certificates.certificates", "region", region), + + resource.TestCheckResourceAttrSet("data.google_certificate_manager_certificates.certificates", "certificates.0.name"), + resource.TestCheckResourceAttr("data.google_certificate_manager_certificates.certificates", "certificates.0.name", certificateName), + + resource.TestCheckResourceAttrSet("data.google_certificate_manager_certificates.certificates", "certificates.0.description"), + resource.TestCheckResourceAttr("data.google_certificate_manager_certificates.certificates", "certificates.0.description", description), + + resource.TestCheckResourceAttrSet("data.google_certificate_manager_certificates.certificates", "certificates.0.labels.%"), + resource.TestCheckResourceAttr("data.google_certificate_manager_certificates.certificates", "certificates.0.labels.%", "3"), + resource.TestCheckResourceAttr("data.google_certificate_manager_certificates.certificates", "certificates.0.labels.terraform", "true"), + resource.TestCheckResourceAttr("data.google_certificate_manager_certificates.certificates", "certificates.0.labels.acc-test", "true"), + ), + }, + }, + }) +} + +func TestAccDataSourceGoogleCertificateManagerCertificates_regionBasic(t *testing.T) { + t.Parallel() + + // Resource identifier used for content testing + region := envvar.GetTestRegionFromEnv() + id := fmt.Sprintf("projects/%s/locations/%s/certificates", envvar.GetTestProjectFromEnv(), region) + name := fmt.Sprintf("tf-test-certificate-%d", acctest.RandInt(t)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccDataSourceGoogleCertificateManagerCertificates_regionBasic(name, region), + Check: resource.ComposeTestCheckFunc( + resource.TestMatchResourceAttr("data.google_certificate_manager_certificates.certificates", "certificates.#", regexp.MustCompile("^[1-9]")), + + resource.TestCheckResourceAttrSet("data.google_certificate_manager_certificates.certificates", "id"), + resource.TestCheckResourceAttr("data.google_certificate_manager_certificates.certificates", "id", id), + + resource.TestCheckResourceAttrSet("data.google_certificate_manager_certificates.certificates", "region"), + resource.TestCheckResourceAttr("data.google_certificate_manager_certificates.certificates", "region", region), + ), + }, + }, + }) +} + +func TestAccDataSourceGoogleCertificateManagerCertificates_managedCertificate(t *testing.T) { + t.Parallel() + + // Resource identifier used for content testing + region := "global" + id := fmt.Sprintf("projects/%s/locations/%s/certificates", envvar.GetTestProjectFromEnv(), region) + name := fmt.Sprintf("tf-test-certificate-%d", acctest.RandInt(t)) + certificateName := fmt.Sprintf("projects/%s/locations/%s/certificates/%s", envvar.GetTestProjectFromEnv(), region, name) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccDataSourceGoogleCertificateManagerCertificates_managedCertificateBasic(name), + Check: resource.ComposeTestCheckFunc( + resource.TestMatchResourceAttr("data.google_certificate_manager_certificates.certificates", "certificates.#", regexp.MustCompile("^[1-9]")), + + resource.TestCheckResourceAttrSet("data.google_certificate_manager_certificates.certificates", "id"), + resource.TestCheckResourceAttr("data.google_certificate_manager_certificates.certificates", "id", id), + + resource.TestCheckResourceAttrSet("data.google_certificate_manager_certificates.certificates", "region"), + resource.TestCheckResourceAttr("data.google_certificate_manager_certificates.certificates", "region", region), + + resource.TestCheckResourceAttrSet("data.google_certificate_manager_certificates.certificates", "certificates.0.name"), + resource.TestCheckResourceAttr("data.google_certificate_manager_certificates.certificates", "certificates.0.name", certificateName), + + resource.TestCheckResourceAttrSet("data.google_certificate_manager_certificates.certificates", "certificates.0.scope"), + resource.TestCheckResourceAttr("data.google_certificate_manager_certificates.certificates", "certificates.0.scope", "EDGE_CACHE"), + + resource.TestCheckResourceAttrSet("data.google_certificate_manager_certificates.certificates", "certificates.0.managed.#"), + resource.TestCheckResourceAttr("data.google_certificate_manager_certificates.certificates", "certificates.0.managed.0.domains.#", "1"), + + resource.TestCheckResourceAttrSet("data.google_certificate_manager_certificates.certificates", "certificates.0.managed.0.state"), + resource.TestCheckResourceAttr("data.google_certificate_manager_certificates.certificates", "certificates.0.managed.0.state", "PROVISIONING"), + + resource.TestCheckResourceAttrSet("data.google_certificate_manager_certificates.certificates", "certificates.0.managed.0.authorization_attempt_info.#"), + resource.TestCheckResourceAttr("data.google_certificate_manager_certificates.certificates", "certificates.0.managed.0.authorization_attempt_info.0.details", ""), + resource.TestCheckResourceAttr("data.google_certificate_manager_certificates.certificates", "certificates.0.managed.0.authorization_attempt_info.0.domain", "terraform.subdomain1.com"), + resource.TestCheckResourceAttr("data.google_certificate_manager_certificates.certificates", "certificates.0.managed.0.authorization_attempt_info.0.failure_reason", ""), + resource.TestCheckResourceAttr("data.google_certificate_manager_certificates.certificates", "certificates.0.managed.0.authorization_attempt_info.0.state", "AUTHORIZING"), + ), + }, + }, + }) +} + +func TestAccDataSourceGoogleCertificateManagerCertificates_managedCertificateDNSAuthorization(t *testing.T) { + t.Parallel() + + // Resource identifier used for content testing + region := "global" + id := fmt.Sprintf("projects/%s/locations/%s/certificates", envvar.GetTestProjectFromEnv(), region) + name := fmt.Sprintf("tf-test-certificate-%d", acctest.RandInt(t)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccDataSourceGoogleCertificateManagerCertificates_managedCertificateDNSAuthorization(name), + Check: resource.ComposeTestCheckFunc( + resource.TestMatchResourceAttr("data.google_certificate_manager_certificates.certificates", "certificates.#", regexp.MustCompile("^[1-9]")), + + resource.TestCheckResourceAttrSet("data.google_certificate_manager_certificates.certificates", "id"), + resource.TestCheckResourceAttr("data.google_certificate_manager_certificates.certificates", "id", id), + + resource.TestCheckResourceAttrSet("data.google_certificate_manager_certificates.certificates", "certificates.0.managed.0.dns_authorizations.#"), + ), + }, + }, + }) +} + +func TestAccDataSourceGoogleCertificateManagerCertificates_managedCertificateIssuerConfig(t *testing.T) { + t.Parallel() + + // Resource identifier used for content testing + region := "global" + id := fmt.Sprintf("projects/%s/locations/%s/certificates", envvar.GetTestProjectFromEnv(), region) + name := fmt.Sprintf("tf-test-certificate-%d", acctest.RandInt(t)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccDataSourceGoogleCertificateManagerCertificates_managedCertificateIssuerConfig(name), + Check: resource.ComposeTestCheckFunc( + resource.TestMatchResourceAttr("data.google_certificate_manager_certificates.certificates", "certificates.#", regexp.MustCompile("^[1-9]")), + + resource.TestCheckResourceAttrSet("data.google_certificate_manager_certificates.certificates", "id"), + resource.TestCheckResourceAttr("data.google_certificate_manager_certificates.certificates", "id", id), + + resource.TestCheckResourceAttrSet("data.google_certificate_manager_certificates.certificates", "certificates.0.managed.0.issuance_config"), + + resource.TestCheckResourceAttrSet("data.google_certificate_manager_certificates.certificates", "certificates.0.managed.0.provisioning_issue.#"), + resource.TestCheckResourceAttrSet("data.google_certificate_manager_certificates.certificates", "certificates.0.managed.0.provisioning_issue.0.details"), + resource.TestCheckResourceAttrSet("data.google_certificate_manager_certificates.certificates", "certificates.0.managed.0.provisioning_issue.0.reason"), + ), + }, + }, + }) +} + +func testAccDataSourceGoogleCertificateManagerCertificates_basic(certificateName, certificateDescription string) string { + return fmt.Sprintf(` +resource "google_certificate_manager_certificate" "default" { + name = "%s" + description = "%s" + self_managed { + pem_certificate = file("test-fixtures/cert.pem") + pem_private_key = file("test-fixtures/private-key.pem") + } + + labels = { + "terraform" : true, + "acc-test" : true, + } +} + +data "google_certificate_manager_certificates" "certificates" { + depends_on = [google_certificate_manager_certificate.default] +} +`, certificateName, certificateDescription) +} + +func testAccDataSourceGoogleCertificateManagerCertificates_full(certificateName, certificateDescription string) string { + return fmt.Sprintf(` +resource "google_certificate_manager_certificate" "default" { + name = "%s" + description = "%s" + self_managed { + pem_certificate = file("test-fixtures/cert.pem") + pem_private_key = file("test-fixtures/private-key.pem") + } + + labels = { + "terraform" : true, + "acc-test" : true, + } +} + +data "google_certificate_manager_certificates" "certificates" { + filter = "name:${google_certificate_manager_certificate.default.id}" + depends_on = [google_certificate_manager_certificate.default] +} +`, certificateName, certificateDescription) +} + +func testAccDataSourceGoogleCertificateManagerCertificates_regionBasic(certificateName, region string) string { + return fmt.Sprintf(` +resource "google_certificate_manager_certificate" "default" { + name = "%s" + location = "%s" + self_managed { + pem_certificate = file("test-fixtures/cert.pem") + pem_private_key = file("test-fixtures/private-key.pem") + } + + labels = { + "terraform" : true, + "acc-test" : true, + } +} + +data "google_certificate_manager_certificates" "certificates" { + filter = "name:${google_certificate_manager_certificate.default.id}" + region = "%s" + depends_on = [google_certificate_manager_certificate.default] +} +`, certificateName, region, region) +} + +func testAccDataSourceGoogleCertificateManagerCertificates_managedCertificateBasic(certificateName string) string { + return fmt.Sprintf(` +resource "google_certificate_manager_certificate" "default" { + name = "%s" + scope = "EDGE_CACHE" + managed { + domains = [ + "terraform.subdomain1.com" + ] + } +} + +data "google_certificate_manager_certificates" "certificates" { + filter = "name:${google_certificate_manager_certificate.default.id}" + depends_on = [google_certificate_manager_certificate.default] +} +`, certificateName) +} + +func testAccDataSourceGoogleCertificateManagerCertificates_managedCertificateDNSAuthorization(certificateName string) string { + return fmt.Sprintf(` +resource "google_certificate_manager_certificate" "default" { + name = "%s" + scope = "EDGE_CACHE" + managed { + domains = [ + google_certificate_manager_dns_authorization.default.domain, + ] + dns_authorizations = [ + google_certificate_manager_dns_authorization.default.id + ] + } +} + +resource "google_certificate_manager_dns_authorization" "default" { + name = "%s" + domain = "terraform.subdomain1.com" +} + +data "google_certificate_manager_certificates" "certificates" { + filter = "name:${google_certificate_manager_certificate.default.id}" + depends_on = [google_certificate_manager_certificate.default] +} +`, certificateName, certificateName) +} + +func testAccDataSourceGoogleCertificateManagerCertificates_managedCertificateIssuerConfig(id string) string { + return fmt.Sprintf(` +resource "google_certificate_manager_certificate" "default" { + name = "%s" + scope = "EDGE_CACHE" + managed { + domains = [ + "terraform.subdomain1.com" + ] + issuance_config = google_certificate_manager_certificate_issuance_config.issuanceconfig.id + } +} + + +# creating certificate_issuance_config to use it in the managed certificate +resource "google_certificate_manager_certificate_issuance_config" "issuanceconfig" { + name = "%s" + description = "sample description for the certificate issuanceConfigs" + certificate_authority_config { + certificate_authority_service_config { + ca_pool = google_privateca_ca_pool.pool.id + } + } + lifetime = "1814400s" + rotation_window_percentage = 34 + key_algorithm = "ECDSA_P256" + depends_on = [google_privateca_certificate_authority.ca_authority] +} + +resource "google_privateca_ca_pool" "pool" { + name = "%s" + location = "us-central1" + tier = "ENTERPRISE" +} + +resource "google_privateca_certificate_authority" "ca_authority" { + location = "us-central1" + pool = google_privateca_ca_pool.pool.name + certificate_authority_id = "%s" + config { + subject_config { + subject { + organization = "HashiCorp" + common_name = "my-certificate-authority" + } + subject_alt_name { + dns_names = ["hashicorp.com"] + } + } + x509_config { + ca_options { + is_ca = true + } + key_usage { + base_key_usage { + cert_sign = true + crl_sign = true + } + extended_key_usage { + server_auth = true + } + } + } + } + key_spec { + algorithm = "RSA_PKCS1_4096_SHA256" + } + + // Disable CA deletion related safe checks for easier cleanup. + deletion_protection = false + skip_grace_period = true + ignore_active_certificates_on_deletion = true +} + +data "google_certificate_manager_certificates" "certificates" { + filter = "name:${google_certificate_manager_certificate.default.id}" + depends_on = [google_certificate_manager_certificate.default] +} +`, id, id, id, id) +} diff --git a/mmv1/third_party/terraform/tpgresource/datasource_helpers.go b/mmv1/third_party/terraform/tpgresource/datasource_helpers.go index 9584bea8b9d3..ba913bbcdb2a 100644 --- a/mmv1/third_party/terraform/tpgresource/datasource_helpers.go +++ b/mmv1/third_party/terraform/tpgresource/datasource_helpers.go @@ -71,3 +71,9 @@ func AddRequiredFieldsToSchema(schema map[string]*schema.Schema, keys ...string) func AddOptionalFieldsToSchema(schema map[string]*schema.Schema, keys ...string) { FixDatasourceSchemaFlags(schema, false, keys...) } + +func DeleteFieldsFromSchema(schema map[string]*schema.Schema, keys ...string) { + for _, key := range keys { + delete(schema, key) + } +} diff --git a/mmv1/third_party/terraform/transport/config.go.erb b/mmv1/third_party/terraform/transport/config.go.erb index 821ff6cdef08..608c51d57778 100644 --- a/mmv1/third_party/terraform/transport/config.go.erb +++ b/mmv1/third_party/terraform/transport/config.go.erb @@ -34,6 +34,7 @@ import ( appengine "google.golang.org/api/appengine/v1" "google.golang.org/api/bigquery/v2" "google.golang.org/api/bigtableadmin/v2" + "google.golang.org/api/certificatemanager/v1" "google.golang.org/api/cloudbilling/v1" "google.golang.org/api/cloudbuild/v1" <% unless version == 'ga' -%> @@ -604,6 +605,20 @@ func (c *Config) getTokenSource(clientScopes []string, initialCredentialsOnly bo // while most only want the host URL, some older ones also want the version and some // of those "projects" as well. You can find out if this is required by looking at // the basePath value in the client library file. +func (c *Config) NewCertificateManagerClient(userAgent string) *certificatemanager.Service { + certificateManagerClientBasePath := RemoveBasePathVersion(c.CertificateManagerBasePath) + log.Printf("[INFO] Instantiating Certificate Manager client for path %s", certificateManagerClientBasePath) + clientCertificateManager, err := certificatemanager.NewService(c.Context, option.WithHTTPClient(c.Client)) + if err != nil { + log.Printf("[WARN] Error creating client certificate manager: %s", err) + return nil + } + clientCertificateManager.UserAgent = userAgent + clientCertificateManager.BasePath = certificateManagerClientBasePath + + return clientCertificateManager +} + func (c *Config) NewComputeClient(userAgent string) *compute.Service { log.Printf("[INFO] Instantiating GCE client for path %s", c.ComputeBasePath) clientCompute, err := compute.NewService(c.Context, option.WithHTTPClient(c.Client)) diff --git a/mmv1/third_party/terraform/website/docs/d/certificate_manager_certificates.html.markdown b/mmv1/third_party/terraform/website/docs/d/certificate_manager_certificates.html.markdown new file mode 100644 index 000000000000..d2199700a56f --- /dev/null +++ b/mmv1/third_party/terraform/website/docs/d/certificate_manager_certificates.html.markdown @@ -0,0 +1,36 @@ +--- +subcategory: "Certificate manager" +description: |- + List all certificates within a project and region. +--- +# google_certificate_manager_certificates + +List all certificates within Google Certificate Manager for a given project, region or filter. + +## Example Usage + +```tf +data "google_certificate_manager_certificates" "default" { +} +``` + +## Example Usage - with a filter + +```tf +data "google_certificate_manager_certificates" "default" { + filter = "name:projects/PROJECT_ID/locations/REGION/certificates/certificate-name-*" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `filter` - (Optional) Filter expression to restrict the certificates returned. +* `project` - (Optional) The ID of the project in which the resource belongs. If it + is not provided, the provider project is used. +* `region` - (Optional) The region in which the resource belongs. If it is not provided, `GLOBAL` is used. + +## Attributes Reference + +See [google_certificate_manager_certificate](https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/certificate_manager_certificate) resource for details of the available attributes. From ba356a39c6c948916aab77da2dc0cab22b95d70a Mon Sep 17 00:00:00 2001 From: Alessio Buraggina <28165200+tdbhacks@users.noreply.github.com> Date: Wed, 4 Sep 2024 11:41:49 -0400 Subject: [PATCH 26/60] Support full resource names in autokey_config.folder (#11413) Co-authored-by: Sarah French <15078782+SarahFrench@users.noreply.github.com> --- mmv1/products/kms/AutokeyConfig.yaml | 8 ++ .../autokey_config_folder_diff.go.erb | 4 + .../go/autokey_config_folder_diff.go.tmpl | 4 + .../kms_autokey_config.go.erb | 1 + .../examples/kms_autokey_config_all.tf.erb | 9 +- .../kms_autokey_config_folder.go.erb | 2 + .../kms_autokey_config_folder.go.erb | 4 + .../kms_autokey_config_folder.go.erb | 1 + .../pre_read/kms_autokey_config_folder.go.erb | 1 + .../kms_autokey_config_folder.go.erb | 1 + .../resource_kms_autokey_config_sweeper.go | 124 ++++++++++++++++++ 11 files changed, 158 insertions(+), 1 deletion(-) create mode 100644 mmv1/templates/terraform/constants/autokey_config_folder_diff.go.erb create mode 100644 mmv1/templates/terraform/constants/go/autokey_config_folder_diff.go.tmpl create mode 100644 mmv1/templates/terraform/post_create/kms_autokey_config_folder.go.erb create mode 100644 mmv1/templates/terraform/pre_create/kms_autokey_config_folder.go.erb create mode 100644 mmv1/templates/terraform/pre_delete/kms_autokey_config_folder.go.erb create mode 100644 mmv1/templates/terraform/pre_read/kms_autokey_config_folder.go.erb create mode 100644 mmv1/templates/terraform/pre_update/kms_autokey_config_folder.go.erb create mode 100644 mmv1/third_party/terraform/services/kms/resource_kms_autokey_config_sweeper.go diff --git a/mmv1/products/kms/AutokeyConfig.yaml b/mmv1/products/kms/AutokeyConfig.yaml index 85e2e405cfa5..531b94e18744 100644 --- a/mmv1/products/kms/AutokeyConfig.yaml +++ b/mmv1/products/kms/AutokeyConfig.yaml @@ -39,7 +39,14 @@ references: !ruby/object:Api::Resource::ReferenceLinks id_format: 'folders/{{folder}}/autokeyConfig' import_format: ['folders/{{folder}}/autokeyConfig'] min_version: beta +# Using a handwritten sweeper because of pre_delete. +skip_sweeper: true custom_code: !ruby/object:Provider::Terraform::CustomCode + constants: templates/terraform/constants/autokey_config_folder_diff.go.erb + pre_create: templates/terraform/pre_create/kms_autokey_config_folder.go.erb + pre_delete: templates/terraform/pre_delete/kms_autokey_config_folder.go.erb + pre_read: templates/terraform/pre_read/kms_autokey_config_folder.go.erb + pre_update: templates/terraform/pre_update/kms_autokey_config_folder.go.erb test_check_destroy: templates/terraform/custom_check_destroy/kms_autokey_config.go.erb examples: - !ruby/object:Provider::Terraform::Examples @@ -65,6 +72,7 @@ parameters: required: true immutable: true url_param_only: true + diff_suppress_func: 'folderPrefixSuppress' description: | The folder for which to retrieve config. properties: diff --git a/mmv1/templates/terraform/constants/autokey_config_folder_diff.go.erb b/mmv1/templates/terraform/constants/autokey_config_folder_diff.go.erb new file mode 100644 index 000000000000..530fc23be125 --- /dev/null +++ b/mmv1/templates/terraform/constants/autokey_config_folder_diff.go.erb @@ -0,0 +1,4 @@ +func folderPrefixSuppress(_, old, new string, d *schema.ResourceData) bool { + prefix := "folders/" + return prefix+old == new || prefix+new == old +} diff --git a/mmv1/templates/terraform/constants/go/autokey_config_folder_diff.go.tmpl b/mmv1/templates/terraform/constants/go/autokey_config_folder_diff.go.tmpl new file mode 100644 index 000000000000..530fc23be125 --- /dev/null +++ b/mmv1/templates/terraform/constants/go/autokey_config_folder_diff.go.tmpl @@ -0,0 +1,4 @@ +func folderPrefixSuppress(_, old, new string, d *schema.ResourceData) bool { + prefix := "folders/" + return prefix+old == new || prefix+new == old +} diff --git a/mmv1/templates/terraform/custom_check_destroy/kms_autokey_config.go.erb b/mmv1/templates/terraform/custom_check_destroy/kms_autokey_config.go.erb index 275a16ddd588..9add7b5818c5 100644 --- a/mmv1/templates/terraform/custom_check_destroy/kms_autokey_config.go.erb +++ b/mmv1/templates/terraform/custom_check_destroy/kms_autokey_config.go.erb @@ -1,6 +1,7 @@ config := acctest.GoogleProviderConfig(t) url, err := tpgresource.ReplaceVarsForTest(config, rs, "{{KMSBasePath}}folders/{{folder}}/autokeyConfig") +url = strings.Replace(url, "folders/folders/", "folders/", 1) if err != nil { return err } diff --git a/mmv1/templates/terraform/examples/kms_autokey_config_all.tf.erb b/mmv1/templates/terraform/examples/kms_autokey_config_all.tf.erb index f72418bdba8c..26dc92ec97e5 100644 --- a/mmv1/templates/terraform/examples/kms_autokey_config_all.tf.erb +++ b/mmv1/templates/terraform/examples/kms_autokey_config_all.tf.erb @@ -64,7 +64,14 @@ resource "time_sleep" "wait_srv_acc_permissions" { resource "google_kms_autokey_config" "<%= ctx[:primary_resource_id] %>" { provider = google-beta - folder = google_folder.autokms_folder.folder_id + folder = google_folder.autokms_folder.id key_project = "projects/${google_project.key_project.project_id}" depends_on = [time_sleep.wait_srv_acc_permissions] } + +# Wait delay after setting AutokeyConfig, to prevent diffs on reapply, +# because setting the config takes a little to fully propagate. +resource "time_sleep" "wait_autokey_propagation" { + create_duration = "30s" + depends_on = [google_kms_autokey_config.<%= ctx[:primary_resource_id] %>] +} diff --git a/mmv1/templates/terraform/post_create/kms_autokey_config_folder.go.erb b/mmv1/templates/terraform/post_create/kms_autokey_config_folder.go.erb new file mode 100644 index 000000000000..7404b3c78e93 --- /dev/null +++ b/mmv1/templates/terraform/post_create/kms_autokey_config_folder.go.erb @@ -0,0 +1,2 @@ +id = strings.Replace(id, "folders/folders/", "folders/", 1) +d.SetId(id) diff --git a/mmv1/templates/terraform/pre_create/kms_autokey_config_folder.go.erb b/mmv1/templates/terraform/pre_create/kms_autokey_config_folder.go.erb new file mode 100644 index 000000000000..ffbef5e2eb69 --- /dev/null +++ b/mmv1/templates/terraform/pre_create/kms_autokey_config_folder.go.erb @@ -0,0 +1,4 @@ +url = strings.Replace(url, "folders/folders/", "folders/", 1) +folderValue := d.Get("folder").(string) +folderValue = strings.Replace(folderValue, "folders/", "", 1) +d.Set("folder", folderValue) diff --git a/mmv1/templates/terraform/pre_delete/kms_autokey_config_folder.go.erb b/mmv1/templates/terraform/pre_delete/kms_autokey_config_folder.go.erb new file mode 100644 index 000000000000..db8c46af1299 --- /dev/null +++ b/mmv1/templates/terraform/pre_delete/kms_autokey_config_folder.go.erb @@ -0,0 +1 @@ +url = strings.Replace(url, "folders/folders/", "folders/", 1) diff --git a/mmv1/templates/terraform/pre_read/kms_autokey_config_folder.go.erb b/mmv1/templates/terraform/pre_read/kms_autokey_config_folder.go.erb new file mode 100644 index 000000000000..db8c46af1299 --- /dev/null +++ b/mmv1/templates/terraform/pre_read/kms_autokey_config_folder.go.erb @@ -0,0 +1 @@ +url = strings.Replace(url, "folders/folders/", "folders/", 1) diff --git a/mmv1/templates/terraform/pre_update/kms_autokey_config_folder.go.erb b/mmv1/templates/terraform/pre_update/kms_autokey_config_folder.go.erb new file mode 100644 index 000000000000..db8c46af1299 --- /dev/null +++ b/mmv1/templates/terraform/pre_update/kms_autokey_config_folder.go.erb @@ -0,0 +1 @@ +url = strings.Replace(url, "folders/folders/", "folders/", 1) diff --git a/mmv1/third_party/terraform/services/kms/resource_kms_autokey_config_sweeper.go b/mmv1/third_party/terraform/services/kms/resource_kms_autokey_config_sweeper.go new file mode 100644 index 000000000000..b1e102438a8d --- /dev/null +++ b/mmv1/third_party/terraform/services/kms/resource_kms_autokey_config_sweeper.go @@ -0,0 +1,124 @@ +package kms + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("KMSAutokeyConfig", testSweepKMSAutokeyConfig) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepKMSAutokeyConfig(region string) error { + resourceName := "KMSAutokeyConfig" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://cloudkms.googleapis.com/v1/folders/{{folder}}/autokeyConfig", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + listUrl = strings.Replace(listUrl, "folders/folders/", "folders/", 1) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["autokeyConfigs"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://cloudkms.googleapis.com/v1/folders/{{folder}}/autokeyConfig?updateMask=keyProject" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + deleteUrl = strings.Replace(deleteUrl, "folders/folders/", "folders/", 1) + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} From b3e10c032f80f42f5a53aab2f8c151cfcb465627 Mon Sep 17 00:00:00 2001 From: Dominykas Norkus Date: Wed, 4 Sep 2024 19:17:16 +0300 Subject: [PATCH 27/60] feat: Remove force replacement from gcfs_config (#11553) --- .../services/container/go/node_config.go.tmpl | 5 +- .../go/resource_container_cluster.go.tmpl | 2 +- .../resource_container_node_pool_test.go.tmpl | 60 ++++++++++--------- .../services/container/node_config.go.erb | 6 +- .../resource_container_cluster.go.erb | 2 +- .../resource_container_node_pool_test.go.erb | 60 ++++++++++--------- 6 files changed, 70 insertions(+), 65 deletions(-) diff --git a/mmv1/third_party/terraform/services/container/go/node_config.go.tmpl b/mmv1/third_party/terraform/services/container/go/node_config.go.tmpl index c5e1425a1f0a..eded879ff7ee 100644 --- a/mmv1/third_party/terraform/services/container/go/node_config.go.tmpl +++ b/mmv1/third_party/terraform/services/container/go/node_config.go.tmpl @@ -99,13 +99,12 @@ func schemaLoggingVariant() *schema.Schema { } } -func schemaGcfsConfig(forceNew bool) *schema.Schema { +func schemaGcfsConfig() *schema.Schema { return &schema.Schema{ Type: schema.TypeList, Optional: true, MaxItems: 1, Description: `GCFS configuration for this node.`, - ForceNew: forceNew, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "enabled": { @@ -339,7 +338,7 @@ func schemaNodeConfig() *schema.Schema { }, }, - "gcfs_config": schemaGcfsConfig(true), + "gcfs_config": schemaGcfsConfig(), "gvnic": { Type: schema.TypeList, diff --git a/mmv1/third_party/terraform/services/container/go/resource_container_cluster.go.tmpl b/mmv1/third_party/terraform/services/container/go/resource_container_cluster.go.tmpl index 9707d9febc5b..7d43a21c88c1 100644 --- a/mmv1/third_party/terraform/services/container/go/resource_container_cluster.go.tmpl +++ b/mmv1/third_party/terraform/services/container/go/resource_container_cluster.go.tmpl @@ -159,7 +159,7 @@ func clusterSchemaNodePoolDefaults() *schema.Schema { Schema: map[string]*schema.Schema{ "containerd_config": schemaContainerdConfig(), {{- if ne $.TargetVersionName "ga" }} - "gcfs_config": schemaGcfsConfig(false), + "gcfs_config": schemaGcfsConfig(), {{- end }} "insecure_kubelet_readonly_port_enabled": schemaInsecureKubeletReadonlyPortEnabled(), "logging_variant": schemaLoggingVariant(), diff --git a/mmv1/third_party/terraform/services/container/go/resource_container_node_pool_test.go.tmpl b/mmv1/third_party/terraform/services/container/go/resource_container_node_pool_test.go.tmpl index 36a609075836..748e0cb13375 100644 --- a/mmv1/third_party/terraform/services/container/go/resource_container_node_pool_test.go.tmpl +++ b/mmv1/third_party/terraform/services/container/go/resource_container_node_pool_test.go.tmpl @@ -1718,7 +1718,11 @@ func TestAccContainerNodePool_gcfsConfig(t *testing.T) { CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), Steps: []resource.TestStep{ { - Config: testAccContainerNodePool_gcfsConfig(cluster, np, networkName, subnetworkName), + Config: testAccContainerNodePool_gcfsConfig(cluster, np, networkName, subnetworkName, true), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_node_pool.np", + "node_config.0.gcfs_config.0.enabled", "true"), + ), }, { ResourceName: "google_container_node_pool.np", @@ -1729,7 +1733,7 @@ func TestAccContainerNodePool_gcfsConfig(t *testing.T) { }) } -func testAccContainerNodePool_gcfsConfig(cluster, np, networkName, subnetworkName string) string { +func testAccContainerNodePool_gcfsConfig(cluster, np, networkName, subnetworkName string, enabled bool) string { return fmt.Sprintf(` resource "google_container_cluster" "cluster" { name = "%s" @@ -1750,11 +1754,11 @@ resource "google_container_node_pool" "np" { machine_type = "n1-standard-8" image_type = "COS_CONTAINERD" gcfs_config { - enabled = true + enabled = %t } } } -`, cluster, networkName, subnetworkName, np) +`, cluster, networkName, subnetworkName, np, enabled) } func TestAccContainerNodePool_gvnic(t *testing.T) { @@ -4734,30 +4738,30 @@ func TestAccContainerNodePool_privateRegistry(t *testing.T) { func testAccContainerNodePool_privateRegistryEnabled(secretID, cluster, nodepool, network, subnetwork string) string { return fmt.Sprintf(` -data "google_project" "test_project" { +data "google_project" "test_project" { + } + +resource "google_secret_manager_secret" "secret-basic" { + secret_id = "%s" + replication { + user_managed { + replicas { + location = "us-central1" + } + } } +} -resource "google_secret_manager_secret" "secret-basic" { - secret_id = "%s" - replication { - user_managed { - replicas { - location = "us-central1" - } - } - } -} - -resource "google_secret_manager_secret_version" "secret-version-basic" { - secret = google_secret_manager_secret.secret-basic.id - secret_data = "dummypassword" - } - -resource "google_secret_manager_secret_iam_member" "secret_iam" { - secret_id = google_secret_manager_secret.secret-basic.id - role = "roles/secretmanager.admin" - member = "serviceAccount:${data.google_project.test_project.number}-compute@developer.gserviceaccount.com" - depends_on = [google_secret_manager_secret_version.secret-version-basic] +resource "google_secret_manager_secret_version" "secret-version-basic" { + secret = google_secret_manager_secret.secret-basic.id + secret_data = "dummypassword" + } + +resource "google_secret_manager_secret_iam_member" "secret_iam" { + secret_id = google_secret_manager_secret.secret-basic.id + role = "roles/secretmanager.admin" + member = "serviceAccount:${data.google_project.test_project.number}-compute@developer.gserviceaccount.com" + depends_on = [google_secret_manager_secret_version.secret-version-basic] } resource "google_container_cluster" "cluster" { @@ -4768,13 +4772,13 @@ resource "google_container_cluster" "cluster" { network = "%s" subnetwork = "%s" } - + resource "google_container_node_pool" "np" { name = "%s" location = "us-central1-a" cluster = google_container_cluster.cluster.name initial_node_count = 1 - + node_config { oauth_scopes = [ "https://www.googleapis.com/auth/cloud-platform", diff --git a/mmv1/third_party/terraform/services/container/node_config.go.erb b/mmv1/third_party/terraform/services/container/node_config.go.erb index fd9feb10e467..3165f7c8bb79 100644 --- a/mmv1/third_party/terraform/services/container/node_config.go.erb +++ b/mmv1/third_party/terraform/services/container/node_config.go.erb @@ -100,19 +100,17 @@ func schemaLoggingVariant() *schema.Schema { } } -func schemaGcfsConfig(forceNew bool) *schema.Schema { +func schemaGcfsConfig() *schema.Schema { return &schema.Schema{ Type: schema.TypeList, Optional: true, MaxItems: 1, Description: `GCFS configuration for this node.`, - ForceNew: forceNew, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "enabled": { Type: schema.TypeBool, Required: true, - ForceNew: forceNew, Description: `Whether or not GCFS is enabled`, }, }, @@ -340,7 +338,7 @@ func schemaNodeConfig() *schema.Schema { }, }, - "gcfs_config": schemaGcfsConfig(true), + "gcfs_config": schemaGcfsConfig(), "gvnic": { Type: schema.TypeList, diff --git a/mmv1/third_party/terraform/services/container/resource_container_cluster.go.erb b/mmv1/third_party/terraform/services/container/resource_container_cluster.go.erb index 1957be85b5b2..0feb0b42e5ed 100644 --- a/mmv1/third_party/terraform/services/container/resource_container_cluster.go.erb +++ b/mmv1/third_party/terraform/services/container/resource_container_cluster.go.erb @@ -160,7 +160,7 @@ func clusterSchemaNodePoolDefaults() *schema.Schema { Schema: map[string]*schema.Schema{ "containerd_config": schemaContainerdConfig(), <% unless version == 'ga' -%> - "gcfs_config": schemaGcfsConfig(false), + "gcfs_config": schemaGcfsConfig(), <% end -%> "insecure_kubelet_readonly_port_enabled": schemaInsecureKubeletReadonlyPortEnabled(), "logging_variant": schemaLoggingVariant(), diff --git a/mmv1/third_party/terraform/services/container/resource_container_node_pool_test.go.erb b/mmv1/third_party/terraform/services/container/resource_container_node_pool_test.go.erb index 9b00517cf10e..794e0c90f1e7 100644 --- a/mmv1/third_party/terraform/services/container/resource_container_node_pool_test.go.erb +++ b/mmv1/third_party/terraform/services/container/resource_container_node_pool_test.go.erb @@ -1719,7 +1719,11 @@ func TestAccContainerNodePool_gcfsConfig(t *testing.T) { CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), Steps: []resource.TestStep{ { - Config: testAccContainerNodePool_gcfsConfig(cluster, np, networkName, subnetworkName), + Config: testAccContainerNodePool_gcfsConfig(cluster, np, networkName, subnetworkName, true), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_node_pool.np", + "node_config.0.gcfs_config.0.enabled", "true"), + ), }, { ResourceName: "google_container_node_pool.np", @@ -1730,7 +1734,7 @@ func TestAccContainerNodePool_gcfsConfig(t *testing.T) { }) } -func testAccContainerNodePool_gcfsConfig(cluster, np, networkName, subnetworkName string) string { +func testAccContainerNodePool_gcfsConfig(cluster, np, networkName, subnetworkName string, enabled bool) string { return fmt.Sprintf(` resource "google_container_cluster" "cluster" { name = "%s" @@ -1751,11 +1755,11 @@ resource "google_container_node_pool" "np" { machine_type = "n1-standard-8" image_type = "COS_CONTAINERD" gcfs_config { - enabled = true + enabled = %t } } } -`, cluster, networkName, subnetworkName, np) +`, cluster, networkName, subnetworkName, np, enabled) } func TestAccContainerNodePool_gvnic(t *testing.T) { @@ -4735,30 +4739,30 @@ func TestAccContainerNodePool_privateRegistry(t *testing.T) { func testAccContainerNodePool_privateRegistryEnabled(secretID, cluster, nodepool, network, subnetwork string) string { return fmt.Sprintf(` -data "google_project" "test_project" { +data "google_project" "test_project" { + } + +resource "google_secret_manager_secret" "secret-basic" { + secret_id = "%s" + replication { + user_managed { + replicas { + location = "us-central1" + } + } } +} -resource "google_secret_manager_secret" "secret-basic" { - secret_id = "%s" - replication { - user_managed { - replicas { - location = "us-central1" - } - } - } -} - -resource "google_secret_manager_secret_version" "secret-version-basic" { - secret = google_secret_manager_secret.secret-basic.id - secret_data = "dummypassword" - } - -resource "google_secret_manager_secret_iam_member" "secret_iam" { - secret_id = google_secret_manager_secret.secret-basic.id - role = "roles/secretmanager.admin" - member = "serviceAccount:${data.google_project.test_project.number}-compute@developer.gserviceaccount.com" - depends_on = [google_secret_manager_secret_version.secret-version-basic] +resource "google_secret_manager_secret_version" "secret-version-basic" { + secret = google_secret_manager_secret.secret-basic.id + secret_data = "dummypassword" + } + +resource "google_secret_manager_secret_iam_member" "secret_iam" { + secret_id = google_secret_manager_secret.secret-basic.id + role = "roles/secretmanager.admin" + member = "serviceAccount:${data.google_project.test_project.number}-compute@developer.gserviceaccount.com" + depends_on = [google_secret_manager_secret_version.secret-version-basic] } resource "google_container_cluster" "cluster" { @@ -4769,13 +4773,13 @@ resource "google_container_cluster" "cluster" { network = "%s" subnetwork = "%s" } - + resource "google_container_node_pool" "np" { name = "%s" location = "us-central1-a" cluster = google_container_cluster.cluster.name initial_node_count = 1 - + node_config { oauth_scopes = [ "https://www.googleapis.com/auth/cloud-platform", From 293c3ca1b6f2de4ce5d731aa8f0c3a65969cc9ac Mon Sep 17 00:00:00 2001 From: harshithpatte-g Date: Thu, 5 Sep 2024 00:10:38 +0530 Subject: [PATCH 28/60] fix: Router advertised-route-priority undefined behavior (#11613) --- .../services/compute/resource_compute_router_peer.go.erb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_router_peer.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_router_peer.go.erb index 9d1ffa896257..e8c83a016d0e 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_router_peer.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_router_peer.go.erb @@ -420,7 +420,7 @@ func resourceComputeRouterBgpPeerCreate(d *schema.ResourceData, meta interface{} advertisedRoutePriorityProp, err := expandNestedComputeRouterBgpPeerAdvertisedRoutePriority(d.Get("advertised_route_priority"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("advertised_route_priority"); ok || !reflect.DeepEqual(v, advertisedRoutePriorityProp) { + } else if v, ok := d.GetOk("advertised_route_priority"); ok || !reflect.DeepEqual(v, advertisedRoutePriorityProp) { obj["advertisedRoutePriority"] = advertisedRoutePriorityProp } advertiseModeProp, err := expandNestedComputeRouterBgpPeerAdvertiseMode(d.Get("advertise_mode"), d, config) @@ -773,7 +773,7 @@ func resourceComputeRouterBgpPeerUpdate(d *schema.ResourceData, meta interface{} advertisedRoutePriorityProp, err := expandNestedComputeRouterBgpPeerAdvertisedRoutePriority(d.Get("advertised_route_priority"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("advertised_route_priority"); ok || !reflect.DeepEqual(v, advertisedRoutePriorityProp) { + } else if v, ok := d.GetOk("advertised_route_priority"); ok || !reflect.DeepEqual(v, advertisedRoutePriorityProp) { obj["advertisedRoutePriority"] = advertisedRoutePriorityProp } advertiseModeProp, err := expandNestedComputeRouterBgpPeerAdvertiseMode(d.Get("advertise_mode"), d, config) From 77949587e66c35205953f95535a126abf143aeb1 Mon Sep 17 00:00:00 2001 From: "Stephen Lewis (Burrows)" Date: Wed, 4 Sep 2024 11:51:49 -0700 Subject: [PATCH 29/60] Fixed potential panics during VCR tests (#11633) --- .ci/magician/cmd/test_terraform_vcr.go | 24 +++++----- .ci/magician/cmd/test_terraform_vcr_test.go | 42 +++++++++--------- .ci/magician/cmd/vcr_cassette_update.go | 4 +- .ci/magician/cmd/vcr_cassette_update_test.go | 18 ++++---- .ci/magician/vcr/tester.go | 46 ++++++++++---------- 5 files changed, 68 insertions(+), 66 deletions(-) diff --git a/.ci/magician/cmd/test_terraform_vcr.go b/.ci/magician/cmd/test_terraform_vcr.go index 3b976fece0d4..83d8106f26b8 100644 --- a/.ci/magician/cmd/test_terraform_vcr.go +++ b/.ci/magician/cmd/test_terraform_vcr.go @@ -56,7 +56,7 @@ var ttvEnvironmentVariables = [...]string{ } type analytics struct { - ReplayingResult *vcr.Result + ReplayingResult vcr.Result RunFullVCR bool AffectedServices []string } @@ -67,7 +67,7 @@ type nonExercisedTests struct { } type withReplayFailedTests struct { - ReplayingResult *vcr.Result + ReplayingResult vcr.Result } type withoutReplayFailedTests struct { @@ -77,8 +77,8 @@ type withoutReplayFailedTests struct { } type recordReplay struct { - RecordingResult *vcr.Result - ReplayingAfterRecordingResult *vcr.Result + RecordingResult vcr.Result + ReplayingAfterRecordingResult vcr.Result HasTerminatedTests bool RecordingErr error AllRecordingPassed bool @@ -265,7 +265,7 @@ func execTestTerraformVCR(prNumber, mmCommitSha, buildID, projectID, buildStep, return nil } - var replayingAfterRecordingResult *vcr.Result + replayingAfterRecordingResult := vcr.Result{} var replayingAfterRecordingErr error if len(recordingResult.PassedTests) > 0 { replayingAfterRecordingResult, replayingAfterRecordingErr = vt.RunParallel(vcr.Replaying, provider.Beta, testDirs, recordingResult.PassedTests) @@ -323,7 +323,7 @@ func execTestTerraformVCR(prNumber, mmCommitSha, buildID, projectID, buildStep, var addedTestsRegexp = regexp.MustCompile(`(?m)^\+func (Test\w+)\(t \*testing.T\) {`) -func notRunTests(gaDiff, betaDiff string, result *vcr.Result) ([]string, []string) { +func notRunTests(gaDiff, betaDiff string, result vcr.Result) ([]string, []string) { fmt.Println("Checking for new acceptance tests that were not run") addedGaTests := addedTestsRegexp.FindAllStringSubmatch(gaDiff, -1) addedBetaTests := addedTestsRegexp.FindAllStringSubmatch(betaDiff, -1) @@ -388,15 +388,15 @@ func modifiedPackages(changedFiles []string) (map[string]struct{}, bool) { return services, runFullVCR } -func runReplaying(runFullVCR bool, services map[string]struct{}, vt *vcr.Tester) (*vcr.Result, []string, error) { - var result *vcr.Result +func runReplaying(runFullVCR bool, services map[string]struct{}, vt *vcr.Tester) (vcr.Result, []string, error) { + result := vcr.Result{} var testDirs []string var replayingErr error if runFullVCR { - fmt.Println("run full VCR tests") + fmt.Println("runReplaying: full VCR tests") result, replayingErr = vt.Run(vcr.Replaying, provider.Beta, nil) } else if len(services) > 0 { - result = &vcr.Result{} + fmt.Printf("runReplaying: %d specific services: %v\n", len(services), services) for service := range services { servicePath := "./" + filepath.Join("google-beta", "services", service) testDirs = append(testDirs, servicePath) @@ -410,12 +410,14 @@ func runReplaying(runFullVCR bool, services map[string]struct{}, vt *vcr.Tester) result.FailedTests = append(result.FailedTests, serviceResult.FailedTests...) result.Panics = append(result.Panics, serviceResult.Panics...) } + } else { + fmt.Println("runReplaying: no impacted services") } return result, testDirs, replayingErr } -func handlePanics(prNumber, buildID, buildStatusTargetURL, mmCommitSha string, result *vcr.Result, mode vcr.Mode, gh GithubClient) (bool, error) { +func handlePanics(prNumber, buildID, buildStatusTargetURL, mmCommitSha string, result vcr.Result, mode vcr.Mode, gh GithubClient) (bool, error) { if len(result.Panics) > 0 { comment := fmt.Sprintf(`$\textcolor{red}{\textsf{The provider crashed while running the VCR tests in %s mode}}$ $\textcolor{red}{\textsf{Please fix it to complete your PR}}$ diff --git a/.ci/magician/cmd/test_terraform_vcr_test.go b/.ci/magician/cmd/test_terraform_vcr_test.go index 604ec2feb06c..393dd5057bb7 100644 --- a/.ci/magician/cmd/test_terraform_vcr_test.go +++ b/.ci/magician/cmd/test_terraform_vcr_test.go @@ -75,14 +75,14 @@ func TestModifiedPackagesFromDiffs(t *testing.T) { func TestNotRunTests(t *testing.T) { cases := map[string]struct { gaDiff, betaDiff string - result *vcr.Result + result vcr.Result wantNotRunBeta []string wantNotRunGa []string }{ "no diff": { gaDiff: "", betaDiff: "", - result: &vcr.Result{ + result: vcr.Result{ PassedTests: []string{"TestAccOne"}, FailedTests: []string{"TestAccTwo"}, }, @@ -92,7 +92,7 @@ func TestNotRunTests(t *testing.T) { "no added tests": { gaDiff: "+// some change", betaDiff: "+// some change", - result: &vcr.Result{ + result: vcr.Result{ PassedTests: []string{"TestAccOne"}, FailedTests: []string{"TestAccTwo"}, }, @@ -102,7 +102,7 @@ func TestNotRunTests(t *testing.T) { "test added and passed": { gaDiff: "+func TestAccTwo(t *testing.T) {", betaDiff: "+func TestAccTwo(t *testing.T) {", - result: &vcr.Result{ + result: vcr.Result{ PassedTests: []string{"TestAccTwo"}, FailedTests: []string{}, }, @@ -114,7 +114,7 @@ func TestNotRunTests(t *testing.T) { +func TestAccThree(t *testing.T) {`, betaDiff: `+func TestAccTwo(t *testing.T) { +func TestAccThree(t *testing.T) {`, - result: &vcr.Result{ + result: vcr.Result{ PassedTests: []string{"TestAccTwo", "TestAccThree"}, FailedTests: []string{}, }, @@ -124,7 +124,7 @@ func TestNotRunTests(t *testing.T) { "test added and failed": { gaDiff: "+func TestAccTwo(t *testing.T) {", betaDiff: "+func TestAccTwo(t *testing.T) {", - result: &vcr.Result{ + result: vcr.Result{ PassedTests: []string{}, FailedTests: []string{"TestAccTwo"}, }, @@ -134,7 +134,7 @@ func TestNotRunTests(t *testing.T) { "tests removed and run": { gaDiff: "-func TestAccOne(t *testing.T) {", betaDiff: "-func TestAccTwo(t *testing.T) {", - result: &vcr.Result{ + result: vcr.Result{ PassedTests: []string{"TestAccOne"}, FailedTests: []string{"TestAccTwo"}, }, @@ -144,7 +144,7 @@ func TestNotRunTests(t *testing.T) { "test added and not run": { gaDiff: "+func TestAccThree(t *testing.T) {", betaDiff: "+func TestAccFour(t *testing.T) {", - result: &vcr.Result{ + result: vcr.Result{ PassedTests: []string{"TestAccOne"}, FailedTests: []string{"TestAccTwo"}, }, @@ -156,7 +156,7 @@ func TestNotRunTests(t *testing.T) { +func TestAccThree(t *testing.T) {`, betaDiff: `+func TestAccTwo(t *testing.T) { +func TestAccThree(t *testing.T) {`, - result: &vcr.Result{ + result: vcr.Result{ PassedTests: []string{"TestAccOne"}, FailedTests: []string{"TestAccFour"}, }, @@ -166,7 +166,7 @@ func TestNotRunTests(t *testing.T) { "tests removed and not run": { gaDiff: "-func TestAccThree(t *testing.T) {", betaDiff: "-func TestAccFour(t *testing.T) {", - result: &vcr.Result{ + result: vcr.Result{ PassedTests: []string{"TestAccOne"}, FailedTests: []string{"TestAccTwo"}, }, @@ -176,7 +176,7 @@ func TestNotRunTests(t *testing.T) { "tests added but commented out": { gaDiff: "+//func TestAccThree(t *testing.T) {", betaDiff: "+//func TestAccFour(t *testing.T) {", - result: &vcr.Result{ + result: vcr.Result{ PassedTests: []string{"TestAccOne"}, FailedTests: []string{"TestAccTwo"}, }, @@ -189,7 +189,7 @@ func TestNotRunTests(t *testing.T) { +func TestAccCloudRunService_cloudRunServiceMulticontainerExample(t *testing.T) {`, betaDiff: `diff --git a/google-beta/services/alloydb/resource_alloydb_backup_generated_test.go b/google-beta/services/alloydb/resource_alloydb_backup_generated_test.go +func TestAccAlloydbBackup_alloydbBackupFullTestNewExample(t *testing.T) {`, - result: &vcr.Result{ + result: vcr.Result{ PassedTests: []string{}, FailedTests: []string{}, }, @@ -199,7 +199,7 @@ func TestNotRunTests(t *testing.T) { "always count GA-only added tests": { gaDiff: "+func TestAccOne(t *testing.T) {", betaDiff: "", - result: &vcr.Result{ + result: vcr.Result{ PassedTests: []string{"TestAccOne"}, FailedTests: []string{"TestAccTwo"}, }, @@ -226,7 +226,7 @@ func TestAnalyticsComment(t *testing.T) { { name: "run full vcr is false and no affected services", data: analytics{ - ReplayingResult: &vcr.Result{ + ReplayingResult: vcr.Result{ PassedTests: []string{"a", "b", "c"}, SkippedTests: []string{"d", "e"}, FailedTests: []string{"f"}, @@ -257,7 +257,7 @@ func TestAnalyticsComment(t *testing.T) { { name: "run full vcr is false and has affected services", data: analytics{ - ReplayingResult: &vcr.Result{ + ReplayingResult: vcr.Result{ PassedTests: []string{"a", "b", "c"}, SkippedTests: []string{"d", "e"}, FailedTests: []string{"f"}, @@ -292,7 +292,7 @@ func TestAnalyticsComment(t *testing.T) { { name: "run full vcr is true", data: analytics{ - ReplayingResult: &vcr.Result{ + ReplayingResult: vcr.Result{ PassedTests: []string{"a", "b", "c"}, SkippedTests: []string{"d", "e"}, FailedTests: []string{"f"}, @@ -427,7 +427,7 @@ func TestWithReplayFailedTests(t *testing.T) { { name: "with failed tests", data: withReplayFailedTests{ - ReplayingResult: &vcr.Result{ + ReplayingResult: vcr.Result{ FailedTests: []string{"a", "b"}, }, }, @@ -525,11 +525,11 @@ func TestRecordReplay(t *testing.T) { { name: "ReplayingAfterRecordingResult has failed tests", data: recordReplay{ - RecordingResult: &vcr.Result{ + RecordingResult: vcr.Result{ PassedTests: []string{"a", "b", "c"}, FailedTests: []string{"d", "e"}, }, - ReplayingAfterRecordingResult: &vcr.Result{ + ReplayingAfterRecordingResult: vcr.Result{ PassedTests: []string{"a"}, FailedTests: []string{"b", "c"}, }, @@ -572,10 +572,10 @@ func TestRecordReplay(t *testing.T) { { name: "ReplayingAfterRecordingResult does not have failed tests", data: recordReplay{ - RecordingResult: &vcr.Result{ + RecordingResult: vcr.Result{ PassedTests: []string{"a", "b", "c"}, }, - ReplayingAfterRecordingResult: &vcr.Result{ + ReplayingAfterRecordingResult: vcr.Result{ PassedTests: []string{"a", "b", "c"}, }, AllRecordingPassed: true, diff --git a/.ci/magician/cmd/vcr_cassette_update.go b/.ci/magician/cmd/vcr_cassette_update.go index 4e59b1cfed81..79e25c9f4ca9 100644 --- a/.ci/magician/cmd/vcr_cassette_update.go +++ b/.ci/magician/cmd/vcr_cassette_update.go @@ -46,13 +46,13 @@ var ( ) type vcrCassetteUpdateReplayingResult struct { - ReplayingResult *vcr.Result + ReplayingResult vcr.Result ReplayingErr error AllReplayingPassed bool } type vcrCassetteUpdateRecordingResult struct { - RecordingResult *vcr.Result + RecordingResult vcr.Result HasTerminatedTests bool RecordingErr error AllRecordingPassed bool diff --git a/.ci/magician/cmd/vcr_cassette_update_test.go b/.ci/magician/cmd/vcr_cassette_update_test.go index 435f870c2e7d..bd1f4ce9d58d 100644 --- a/.ci/magician/cmd/vcr_cassette_update_test.go +++ b/.ci/magician/cmd/vcr_cassette_update_test.go @@ -23,7 +23,7 @@ func TestFormatVCRCassettesUpdateReplaying(t *testing.T) { name: "replay error", data: vcrCassetteUpdateReplayingResult{ ReplayingErr: fmt.Errorf("some error"), - ReplayingResult: &vcr.Result{ + ReplayingResult: vcr.Result{ PassedTests: []string{"a", "b"}, FailedTests: []string{"c", "d"}, SkippedTests: []string{"e"}, @@ -55,7 +55,7 @@ func TestFormatVCRCassettesUpdateReplaying(t *testing.T) { { name: "replay success", data: vcrCassetteUpdateReplayingResult{ - ReplayingResult: &vcr.Result{ + ReplayingResult: vcr.Result{ PassedTests: []string{"a", "b"}, SkippedTests: []string{"e"}, }, @@ -82,7 +82,7 @@ func TestFormatVCRCassettesUpdateReplaying(t *testing.T) { { name: "replay failure without error", data: vcrCassetteUpdateReplayingResult{ - ReplayingResult: &vcr.Result{ + ReplayingResult: vcr.Result{ PassedTests: []string{"a", "b"}, FailedTests: []string{"c", "d"}, SkippedTests: []string{"e"}, @@ -111,7 +111,7 @@ func TestFormatVCRCassettesUpdateReplaying(t *testing.T) { { name: "replay panic", data: vcrCassetteUpdateReplayingResult{ - ReplayingResult: &vcr.Result{ + ReplayingResult: vcr.Result{ PassedTests: []string{"a", "b"}, FailedTests: []string{"c", "d"}, SkippedTests: []string{"e"}, @@ -150,7 +150,7 @@ func TestFormatVCRCassettesUpdateRecording(t *testing.T) { name: "record error", data: vcrCassetteUpdateRecordingResult{ RecordingErr: fmt.Errorf("some error"), - RecordingResult: &vcr.Result{ + RecordingResult: vcr.Result{ PassedTests: []string{"a", "b"}, FailedTests: []string{"c", "d"}, }, @@ -181,7 +181,7 @@ func TestFormatVCRCassettesUpdateRecording(t *testing.T) { { name: "record success", data: vcrCassetteUpdateRecordingResult{ - RecordingResult: &vcr.Result{ + RecordingResult: vcr.Result{ PassedTests: []string{"a", "b"}, }, AllRecordingPassed: true, @@ -209,7 +209,7 @@ func TestFormatVCRCassettesUpdateRecording(t *testing.T) { { name: "record failed without error", data: vcrCassetteUpdateRecordingResult{ - RecordingResult: &vcr.Result{ + RecordingResult: vcr.Result{ PassedTests: []string{"a", "b"}, FailedTests: []string{"c", "d"}, }, @@ -235,7 +235,7 @@ func TestFormatVCRCassettesUpdateRecording(t *testing.T) { { name: "record panic", data: vcrCassetteUpdateRecordingResult{ - RecordingResult: &vcr.Result{ + RecordingResult: vcr.Result{ PassedTests: []string{"a", "b"}, FailedTests: []string{"c", "d"}, Panics: []string{"e"}, @@ -254,7 +254,7 @@ func TestFormatVCRCassettesUpdateRecording(t *testing.T) { { name: "has terminated test", data: vcrCassetteUpdateRecordingResult{ - RecordingResult: &vcr.Result{ + RecordingResult: vcr.Result{ PassedTests: []string{"a", "b"}, }, HasTerminatedTests: true, diff --git a/.ci/magician/vcr/tester.go b/.ci/magician/vcr/tester.go index cb3a4c593130..964f9c3fd006 100644 --- a/.ci/magician/vcr/tester.go +++ b/.ci/magician/vcr/tester.go @@ -142,24 +142,24 @@ func (vt *Tester) LogPath(mode Mode, version provider.Version) string { // Run the vcr tests in the given mode and provider version and return the result. // This will overwrite any existing logs for the given mode and version. -func (vt *Tester) Run(mode Mode, version provider.Version, testDirs []string) (*Result, error) { +func (vt *Tester) Run(mode Mode, version provider.Version, testDirs []string) (Result, error) { logPath, err := vt.getLogPath(mode, version) if err != nil { - return nil, err + return Result{}, err } repoPath, ok := vt.repoPaths[version] if !ok { - return nil, fmt.Errorf("no repo cloned for version %s in %v", version, vt.repoPaths) + return Result{}, fmt.Errorf("no repo cloned for version %s in %v", version, vt.repoPaths) } if err := vt.rnr.PushDir(repoPath); err != nil { - return nil, err + return Result{}, err } if len(testDirs) == 0 { var err error testDirs, err = vt.googleTestDirectory() if err != nil { - return nil, err + return Result{}, err } } @@ -168,14 +168,14 @@ func (vt *Tester) Run(mode Mode, version provider.Version, testDirs []string) (* case Replaying: cassettePath, ok = vt.cassettePaths[version] if !ok { - return nil, fmt.Errorf("cassettes not fetched for version %s", version) + return Result{}, fmt.Errorf("cassettes not fetched for version %s", version) } case Recording: if err := vt.rnr.RemoveAll(cassettePath); err != nil { - return nil, fmt.Errorf("error removing cassettes: %v", err) + return Result{}, fmt.Errorf("error removing cassettes: %v", err) } if err := vt.rnr.Mkdir(cassettePath); err != nil { - return nil, fmt.Errorf("error creating cassette dir: %v", err) + return Result{}, fmt.Errorf("error creating cassette dir: %v", err) } vt.cassettePaths[version] = cassettePath } @@ -228,7 +228,7 @@ func (vt *Tester) Run(mode Mode, version provider.Version, testDirs []string) (* } // Leave repo directory. if err := vt.rnr.PopDir(); err != nil { - return nil, err + return Result{}, err } logFileName := filepath.Join(vt.baseDir, "testlogs", fmt.Sprintf("%s_test.log", mode.Lower())) @@ -240,31 +240,31 @@ func (vt *Tester) Run(mode Mode, version provider.Version, testDirs []string) (* } allOutput += output if err := vt.rnr.WriteFile(logFileName, allOutput); err != nil { - return nil, fmt.Errorf("error writing log: %v, test output: %v", err, allOutput) + return Result{}, fmt.Errorf("error writing log: %v, test output: %v", err, allOutput) } return collectResult(output), testErr } -func (vt *Tester) RunParallel(mode Mode, version provider.Version, testDirs, tests []string) (*Result, error) { +func (vt *Tester) RunParallel(mode Mode, version provider.Version, testDirs, tests []string) (Result, error) { logPath, err := vt.getLogPath(mode, version) if err != nil { - return nil, err + return Result{}, err } if err := vt.rnr.Mkdir(filepath.Join(vt.baseDir, "testlogs", mode.Lower()+"_build")); err != nil { - return nil, err + return Result{}, err } repoPath, ok := vt.repoPaths[version] if !ok { - return nil, fmt.Errorf("no repo cloned for version %s in %v", version, vt.repoPaths) + return Result{}, fmt.Errorf("no repo cloned for version %s in %v", version, vt.repoPaths) } if err := vt.rnr.PushDir(repoPath); err != nil { - return nil, err + return Result{}, err } if len(testDirs) == 0 { var err error testDirs, err = vt.googleTestDirectory() if err != nil { - return nil, err + return Result{}, err } } @@ -273,14 +273,14 @@ func (vt *Tester) RunParallel(mode Mode, version provider.Version, testDirs, tes case Replaying: cassettePath, ok = vt.cassettePaths[version] if !ok { - return nil, fmt.Errorf("cassettes not fetched for version %s", version) + return Result{}, fmt.Errorf("cassettes not fetched for version %s", version) } case Recording: if err := vt.rnr.RemoveAll(cassettePath); err != nil { - return nil, fmt.Errorf("error removing cassettes: %v", err) + return Result{}, fmt.Errorf("error removing cassettes: %v", err) } if err := vt.rnr.Mkdir(cassettePath); err != nil { - return nil, fmt.Errorf("error creating cassette dir: %v", err) + return Result{}, fmt.Errorf("error creating cassette dir: %v", err) } vt.cassettePaths[version] = cassettePath } @@ -304,7 +304,7 @@ func (vt *Tester) RunParallel(mode Mode, version provider.Version, testDirs, tes // Leave repo directory. if err := vt.rnr.PopDir(); err != nil { - return nil, err + return Result{}, err } var output string for otpt := range outputs { @@ -312,7 +312,7 @@ func (vt *Tester) RunParallel(mode Mode, version provider.Version, testDirs, tes } logFileName := filepath.Join(vt.baseDir, "testlogs", fmt.Sprintf("%s_test.log", mode.Lower())) if err := vt.rnr.WriteFile(logFileName, output); err != nil { - return nil, err + return Result{}, err } var testErr error for err := range errs { @@ -483,7 +483,7 @@ func (vt *Tester) printLogs(logPath string) { }) } -func collectResult(output string) *Result { +func collectResult(output string) Result { matches := testResultsExpression.FindAllStringSubmatch(output, -1) resultSets := make(map[string]map[string]struct{}, 4) for _, submatches := range matches { @@ -505,7 +505,7 @@ func collectResult(output string) *Result { } sort.Strings(results[kind]) } - return &Result{ + return Result{ FailedTests: results["FAIL"], PassedTests: results["PASS"], SkippedTests: results["SKIP"], From f52c3af4e9f8ffcae4fe7f8b674b244a078818a3 Mon Sep 17 00:00:00 2001 From: Ryan Oaks Date: Wed, 4 Sep 2024 16:01:19 -0400 Subject: [PATCH 30/60] Add memorystore api (#11631) --- .ci/infra/terraform/main.tf | 1 + 1 file changed, 1 insertion(+) diff --git a/.ci/infra/terraform/main.tf b/.ci/infra/terraform/main.tf index fdb831f0733e..72daa43e2e00 100644 --- a/.ci/infra/terraform/main.tf +++ b/.ci/infra/terraform/main.tf @@ -279,6 +279,7 @@ module "project-services" { "looker.googleapis.com", "managedidentities.googleapis.com", "memcache.googleapis.com", + "memorystore.googleapis.com", "metastore.googleapis.com", "migrationcenter.googleapis.com", "ml.googleapis.com", From d323523aa265113daa2b63042b244aa79e3e0f1f Mon Sep 17 00:00:00 2001 From: thokalavinod Date: Wed, 4 Sep 2024 20:10:22 +0000 Subject: [PATCH 31/60] Added Filter validation for google_scc_notification_config (#11535) --- .../securitycenter/NotificationConfig.yaml | 3 ++ .../ProjectNotificationConfig.yaml | 3 ++ .../OrganizationNotificationConfig.yaml | 3 ++ .../ProjectNotificationConfig.yaml | 3 ++ .../resource_scc_notification_config_test.go | 28 +++++++++++++++++ ...ce_scc_project_notification_config_test.go | 28 +++++++++++++++++ ...2_organization_notification_config_test.go | 31 +++++++++++++++++++ ...scc_v2_project_notification_config_test.go | 29 +++++++++++++++++ 8 files changed, 128 insertions(+) diff --git a/mmv1/products/securitycenter/NotificationConfig.yaml b/mmv1/products/securitycenter/NotificationConfig.yaml index 647253910f57..fcc566f4c9ca 100644 --- a/mmv1/products/securitycenter/NotificationConfig.yaml +++ b/mmv1/products/securitycenter/NotificationConfig.yaml @@ -86,6 +86,8 @@ properties: - !ruby/object:Api::Type::NestedObject name: streamingConfig required: true + allow_empty_object: true + send_empty_value: true description: | The config for triggering streaming-based notifications. update_mask_fields: @@ -94,6 +96,7 @@ properties: - !ruby/object:Api::Type::String name: filter required: true + send_empty_value: true description: | Expression that defines the filter to apply across create/update events of assets or findings as specified by the event type. The diff --git a/mmv1/products/securitycenter/ProjectNotificationConfig.yaml b/mmv1/products/securitycenter/ProjectNotificationConfig.yaml index d76a7db3b229..b73403eb14d7 100644 --- a/mmv1/products/securitycenter/ProjectNotificationConfig.yaml +++ b/mmv1/products/securitycenter/ProjectNotificationConfig.yaml @@ -80,6 +80,8 @@ properties: - !ruby/object:Api::Type::NestedObject name: streamingConfig required: true + allow_empty_object: true + send_empty_value: true description: | The config for triggering streaming-based notifications. update_mask_fields: @@ -88,6 +90,7 @@ properties: - !ruby/object:Api::Type::String name: filter required: true + send_empty_value: true description: | Expression that defines the filter to apply across create/update events of assets or findings as specified by the event type. The diff --git a/mmv1/products/securitycenterv2/OrganizationNotificationConfig.yaml b/mmv1/products/securitycenterv2/OrganizationNotificationConfig.yaml index 1026cf8d27be..5d9e9c1ff065 100644 --- a/mmv1/products/securitycenterv2/OrganizationNotificationConfig.yaml +++ b/mmv1/products/securitycenterv2/OrganizationNotificationConfig.yaml @@ -93,6 +93,8 @@ properties: - !ruby/object:Api::Type::NestedObject name: streamingConfig required: true + allow_empty_object: true + send_empty_value: true description: | The config for triggering streaming-based notifications. update_mask_fields: @@ -101,6 +103,7 @@ properties: - !ruby/object:Api::Type::String name: filter required: true + send_empty_value: true description: | Expression that defines the filter to apply across create/update events of assets or findings as specified by the event type. The diff --git a/mmv1/products/securitycenterv2/ProjectNotificationConfig.yaml b/mmv1/products/securitycenterv2/ProjectNotificationConfig.yaml index fad60f931ef0..8327aeb049cf 100644 --- a/mmv1/products/securitycenterv2/ProjectNotificationConfig.yaml +++ b/mmv1/products/securitycenterv2/ProjectNotificationConfig.yaml @@ -90,6 +90,8 @@ properties: - !ruby/object:Api::Type::NestedObject name: streamingConfig required: true + allow_empty_object: true + send_empty_value: true description: | The config for triggering streaming-based notifications. update_mask_fields: @@ -98,6 +100,7 @@ properties: - !ruby/object:Api::Type::String name: filter required: true + send_empty_value: true description: | Expression that defines the filter to apply across create/update events of assets or findings as specified by the event type. The diff --git a/mmv1/third_party/terraform/services/securitycenter/resource_scc_notification_config_test.go b/mmv1/third_party/terraform/services/securitycenter/resource_scc_notification_config_test.go index e172230d8ad0..551fbe0d0d4d 100644 --- a/mmv1/third_party/terraform/services/securitycenter/resource_scc_notification_config_test.go +++ b/mmv1/third_party/terraform/services/securitycenter/resource_scc_notification_config_test.go @@ -39,6 +39,15 @@ func TestAccSecurityCenterNotificationConfig_updateStreamingConfigFilter(t *test ImportStateVerify: true, ImportStateVerifyIgnore: []string{"organization", "config_id"}, }, + { + Config: testAccSecurityCenterNotificationConfig_emptyStreamingConfigFilter(context), + }, + { + ResourceName: "google_scc_notification_config.custom_notification_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"organization", "config_id"}, + }, }, }) } @@ -61,3 +70,22 @@ resource "google_scc_notification_config" "custom_notification_config" { } `, context) } + +func testAccSecurityCenterNotificationConfig_emptyStreamingConfigFilter(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_pubsub_topic" "scc_notification" { + name = "tf-test-my-topic%{random_suffix}" +} + +resource "google_scc_notification_config" "custom_notification_config" { + config_id = "tf-test-my-config%{random_suffix}" + organization = "%{org_id}" + description = "My custom Cloud Security Command Center Finding Notification Configuration" + pubsub_topic = google_pubsub_topic.scc_notification.id + + streaming_config { + filter = "" + } +} +`, context) +} diff --git a/mmv1/third_party/terraform/services/securitycenter/resource_scc_project_notification_config_test.go b/mmv1/third_party/terraform/services/securitycenter/resource_scc_project_notification_config_test.go index 77e5328d7475..ca9d8d2be850 100644 --- a/mmv1/third_party/terraform/services/securitycenter/resource_scc_project_notification_config_test.go +++ b/mmv1/third_party/terraform/services/securitycenter/resource_scc_project_notification_config_test.go @@ -39,6 +39,15 @@ func TestAccSecurityCenterProjectNotificationConfig_updateStreamingConfigFilter( ImportStateVerify: true, ImportStateVerifyIgnore: []string{"project", "config_id"}, }, + { + Config: testAccSecurityCenterProjectNotificationConfig_emptyStreamingConfigFilter(context), + }, + { + ResourceName: "google_scc_project_notification_config.custom_notification_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"project", "config_id"}, + }, }, }) } @@ -61,3 +70,22 @@ resource "google_scc_project_notification_config" "custom_notification_config" { } `, context) } + +func testAccSecurityCenterProjectNotificationConfig_emptyStreamingConfigFilter(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_pubsub_topic" "scc_project_notification" { + name = "tf-test-my-topic%{random_suffix}" +} + +resource "google_scc_project_notification_config" "custom_notification_config" { + config_id = "tf-test-my-config%{random_suffix}" + project = "%{project}" + description = "My custom Cloud Security Command Center Finding Notification Configuration" + pubsub_topic = google_pubsub_topic.scc_project_notification.id + + streaming_config { + filter = "" + } +} +`, context) +} diff --git a/mmv1/third_party/terraform/services/securitycenterv2/resource_scc_v2_organization_notification_config_test.go b/mmv1/third_party/terraform/services/securitycenterv2/resource_scc_v2_organization_notification_config_test.go index eebf15dc9ef3..4c62997b7af6 100644 --- a/mmv1/third_party/terraform/services/securitycenterv2/resource_scc_v2_organization_notification_config_test.go +++ b/mmv1/third_party/terraform/services/securitycenterv2/resource_scc_v2_organization_notification_config_test.go @@ -42,6 +42,17 @@ func TestAccSecurityCenterV2OrganizationNotificationConfig_basic(t *testing.T) { "config_id", }, }, + { + Config: testAccSecurityCenterV2OrganizationNotificationConfig_empty(context), + }, + { + ResourceName: "google_scc_v2_organization_notification_config.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "config_id", + }, + }, }, }) } @@ -85,3 +96,23 @@ resource "google_scc_v2_organization_notification_config" "default" { } `, context) } + +func testAccSecurityCenterV2OrganizationNotificationConfig_empty(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_pubsub_topic" "scc_v2_organization_notification_config" { + name = "tf-test-topic-%{random_suffix}" +} + +resource "google_scc_v2_organization_notification_config" "default" { + config_id = "tf-test-config-%{random_suffix}" + organization = "%{org_id}" + location = "global" + description = "An updated test organization notification config" + pubsub_topic = google_pubsub_topic.scc_v2_organization_notification_config.id + + streaming_config { + filter = "" + } +} +`, context) +} diff --git a/mmv1/third_party/terraform/services/securitycenterv2/resource_scc_v2_project_notification_config_test.go b/mmv1/third_party/terraform/services/securitycenterv2/resource_scc_v2_project_notification_config_test.go index d46d3b8a85ec..0d9891f3edc3 100644 --- a/mmv1/third_party/terraform/services/securitycenterv2/resource_scc_v2_project_notification_config_test.go +++ b/mmv1/third_party/terraform/services/securitycenterv2/resource_scc_v2_project_notification_config_test.go @@ -42,6 +42,15 @@ func TestAccSecurityCenterV2ProjectNotificationConfig_updateStreamingConfigFilte ImportStateVerify: true, ImportStateVerifyIgnore: []string{"project", "location", "config_id"}, }, + { + Config: testAccSecurityCenterV2ProjectNotificationConfig_emptyStreamingConfigFilter(context), + }, + { + ResourceName: "google_scc_v2_project_notification_config.custom_notification_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"project", "location", "config_id"}, + }, }, }) } @@ -65,3 +74,23 @@ resource "google_scc_v2_project_notification_config" "custom_notification_config } `, context) } + +func testAccSecurityCenterV2ProjectNotificationConfig_emptyStreamingConfigFilter(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_pubsub_topic" "scc_v2_project_notification" { + name = "tf-test-my-topic%{random_suffix}" +} + +resource "google_scc_v2_project_notification_config" "custom_notification_config" { + config_id = "tf-test-my-config%{random_suffix}" + project = "%{project}" + description = "My custom Cloud Security Command Center Finding Notification Configuration" + pubsub_topic = google_pubsub_topic.scc_v2_project_notification.id + location = "global" + + streaming_config { + filter = "" + } +} +`, context) +} From 5b033ec44860b3116356ed084e9ee6333cf3d25f Mon Sep 17 00:00:00 2001 From: Ryan Oaks Date: Wed, 4 Sep 2024 17:07:34 -0400 Subject: [PATCH 32/60] Fix deep_merge function to correctly handle nil arrays (#11636) --- mmv1/api/object.go | 9 ++++++++- mmv1/api/object.rb | 7 ++++--- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/mmv1/api/object.go b/mmv1/api/object.go index 1149fa42da79..7c1c8f8a821b 100644 --- a/mmv1/api/object.go +++ b/mmv1/api/object.go @@ -34,11 +34,18 @@ type NamedObject struct { // } // func (n *Named) deep_merge(arr1, arr2) { +// if arr1.nil? +// return arr2 +// end +// if arr2.nil? +// return arr1 +// end + // // Scopes is an array of standard strings. In which case return the // // version in the overrides. This allows scopes to be removed rather // // than allowing for a merge of the two arrays // if string_array?(arr1) -// return arr2.nil? ? arr1 : arr2 +// return arr2 // end // // Merge any elements that exist in both diff --git a/mmv1/api/object.rb b/mmv1/api/object.rb index f3f4012a32af..b11dbaeda54b 100644 --- a/mmv1/api/object.rb +++ b/mmv1/api/object.rb @@ -30,12 +30,13 @@ def string_array?(arr) end def deep_merge(arr1, arr2) + return arr2 if arr1.nil? + return arr1 if arr2.nil? + # Scopes is an array of standard strings. In which case return the # version in the overrides. This allows scopes to be removed rather # than allowing for a merge of the two arrays - if string_array?(arr1) - return arr2.nil? ? arr1 : arr2 - end + return arr2 if string_array?(arr1) # Merge any elements that exist in both result = arr1.map do |el1| From 6dcdc468a4ec4b18ef4eec29acad9990430092de Mon Sep 17 00:00:00 2001 From: Thomas Rodgers Date: Wed, 4 Sep 2024 14:18:09 -0700 Subject: [PATCH 33/60] Make vcr tester Run and RunParallel use options struct (#11628) Co-authored-by: Stephen Lewis (Burrows) --- .ci/magician/cmd/check_cassettes.go | 5 +- .ci/magician/cmd/generate_comment.go | 2 +- .ci/magician/cmd/request_service_reviewers.go | 2 +- .ci/magician/cmd/scheduled_pr_reminders.go | 4 +- .ci/magician/cmd/test_terraform_vcr.go | 25 +++++- .ci/magician/cmd/vcr_cassette_update.go | 11 ++- .ci/magician/vcr/tester.go | 78 ++++++++++--------- 7 files changed, 81 insertions(+), 46 deletions(-) diff --git a/.ci/magician/cmd/check_cassettes.go b/.ci/magician/cmd/check_cassettes.go index 84bfacd628c9..bd0e8f1b9693 100644 --- a/.ci/magician/cmd/check_cassettes.go +++ b/.ci/magician/cmd/check_cassettes.go @@ -103,7 +103,10 @@ func execCheckCassettes(commit string, vt *vcr.Tester, ctlr *source.Controller) } vt.SetRepoPath(provider.Beta, providerRepo.Path) - result, err := vt.Run(vcr.Replaying, provider.Beta, nil) + result, err := vt.Run(vcr.RunOptions{ + Mode: vcr.Replaying, + Version: provider.Beta, + }) if err != nil { fmt.Println("Error running VCR: ", err) } diff --git a/.ci/magician/cmd/generate_comment.go b/.ci/magician/cmd/generate_comment.go index 0efc3fc65ba8..f1a963d8f3f9 100644 --- a/.ci/magician/cmd/generate_comment.go +++ b/.ci/magician/cmd/generate_comment.go @@ -435,7 +435,7 @@ func buildDiffProcessor(diffProcessorPath, providerLocalPath string, env map[str } } if _, err := rnr.Run("make", []string{"build"}, env); err != nil { - return fmt.Errorf("Error running make build in %s: %v\n", diffProcessorPath, err) + return fmt.Errorf("error running make build in %s: %v", diffProcessorPath, err) } return rnr.PopDir() } diff --git a/.ci/magician/cmd/request_service_reviewers.go b/.ci/magician/cmd/request_service_reviewers.go index 267d182debd5..c9f335408cad 100644 --- a/.ci/magician/cmd/request_service_reviewers.go +++ b/.ci/magician/cmd/request_service_reviewers.go @@ -107,7 +107,7 @@ func execRequestServiceReviewers(prNumber string, gh GithubClient, enrolledTeams } exitCode := 0 - for githubTeam, _ := range githubTeamsSet { + for githubTeam := range githubTeamsSet { members, err := gh.GetTeamMembers("GoogleCloudPlatform", githubTeam) if err != nil { fmt.Printf("Error fetching members for GoogleCloudPlatform/%s: %s", githubTeam, err) diff --git a/.ci/magician/cmd/scheduled_pr_reminders.go b/.ci/magician/cmd/scheduled_pr_reminders.go index a6806c577332..6cfac38f013c 100644 --- a/.ci/magician/cmd/scheduled_pr_reminders.go +++ b/.ci/magician/cmd/scheduled_pr_reminders.go @@ -189,7 +189,7 @@ func execScheduledPrReminders(gh *github.Client) error { }, ) if err != nil { - return fmt.Errorf("Error posting comment to PR %d: %w", *pr.Number, err) + return fmt.Errorf("error posting comment to PR %d: %w", *pr.Number, err) } } } @@ -208,7 +208,7 @@ func execScheduledPrReminders(gh *github.Client) error { }, ) if err != nil { - return fmt.Errorf("Error closing PR %d: %w", *pr.Number, err) + return fmt.Errorf("error closing PR %d: %w", *pr.Number, err) } } } diff --git a/.ci/magician/cmd/test_terraform_vcr.go b/.ci/magician/cmd/test_terraform_vcr.go index 83d8106f26b8..042be4732129 100644 --- a/.ci/magician/cmd/test_terraform_vcr.go +++ b/.ci/magician/cmd/test_terraform_vcr.go @@ -244,7 +244,12 @@ func execTestTerraformVCR(prNumber, mmCommitSha, buildID, projectID, buildStep, return fmt.Errorf("error posting comment: %w", err) } - recordingResult, recordingErr := vt.RunParallel(vcr.Recording, provider.Beta, testDirs, replayingResult.FailedTests) + recordingResult, recordingErr := vt.RunParallel(vcr.RunOptions{ + Mode: vcr.Recording, + Version: provider.Beta, + TestDirs: testDirs, + Tests: replayingResult.FailedTests, + }) if recordingErr != nil { testState = "failure" } else { @@ -268,7 +273,12 @@ func execTestTerraformVCR(prNumber, mmCommitSha, buildID, projectID, buildStep, replayingAfterRecordingResult := vcr.Result{} var replayingAfterRecordingErr error if len(recordingResult.PassedTests) > 0 { - replayingAfterRecordingResult, replayingAfterRecordingErr = vt.RunParallel(vcr.Replaying, provider.Beta, testDirs, recordingResult.PassedTests) + replayingAfterRecordingResult, replayingAfterRecordingErr = vt.RunParallel(vcr.RunOptions{ + Mode: vcr.Replaying, + Version: provider.Beta, + TestDirs: testDirs, + Tests: recordingResult.PassedTests, + }) if replayingAfterRecordingErr != nil { testState = "failure" } @@ -394,14 +404,21 @@ func runReplaying(runFullVCR bool, services map[string]struct{}, vt *vcr.Tester) var replayingErr error if runFullVCR { fmt.Println("runReplaying: full VCR tests") - result, replayingErr = vt.Run(vcr.Replaying, provider.Beta, nil) + result, replayingErr = vt.Run(vcr.RunOptions{ + Mode: vcr.Replaying, + Version: provider.Beta, + }) } else if len(services) > 0 { fmt.Printf("runReplaying: %d specific services: %v\n", len(services), services) for service := range services { servicePath := "./" + filepath.Join("google-beta", "services", service) testDirs = append(testDirs, servicePath) fmt.Println("run VCR tests in ", service) - serviceResult, serviceReplayingErr := vt.Run(vcr.Replaying, provider.Beta, []string{servicePath}) + serviceResult, serviceReplayingErr := vt.Run(vcr.RunOptions{ + Mode: vcr.Replaying, + Version: provider.Beta, + TestDirs: []string{servicePath}, + }) if serviceReplayingErr != nil { replayingErr = serviceReplayingErr } diff --git a/.ci/magician/cmd/vcr_cassette_update.go b/.ci/magician/cmd/vcr_cassette_update.go index 79e25c9f4ca9..bcf6642e6586 100644 --- a/.ci/magician/cmd/vcr_cassette_update.go +++ b/.ci/magician/cmd/vcr_cassette_update.go @@ -125,7 +125,10 @@ func execVCRCassetteUpdate(buildID, today string, rnr ExecRunner, ctlr *source.C vt.SetRepoPath(provider.Beta, providerRepo.Path) fmt.Println("running tests in REPLAYING mode now") - replayingResult, replayingErr := vt.Run(vcr.Replaying, provider.Beta, nil) + replayingResult, replayingErr := vt.Run(vcr.RunOptions{ + Mode: vcr.Replaying, + Version: provider.Beta, + }) // upload replay build and test logs buildLogPath := filepath.Join(rnr.GetCWD(), "testlogs", fmt.Sprintf("%s_test.log", vcr.Replaying.Lower())) @@ -156,7 +159,11 @@ func execVCRCassetteUpdate(buildID, today string, rnr ExecRunner, ctlr *source.C if len(replayingResult.FailedTests) != 0 { fmt.Println("running tests in RECORDING mode now") - recordingResult, recordingErr := vt.RunParallel(vcr.Recording, provider.Beta, nil, replayingResult.FailedTests) + recordingResult, recordingErr := vt.RunParallel(vcr.RunOptions{ + Mode: vcr.Recording, + Version: provider.Beta, + Tests: replayingResult.FailedTests, + }) // upload build and test logs first to preserve debugging logs in case // uploading cassettes failed because recording not work diff --git a/.ci/magician/vcr/tester.go b/.ci/magician/vcr/tester.go index 964f9c3fd006..63d6f1445079 100644 --- a/.ci/magician/vcr/tester.go +++ b/.ci/magician/vcr/tester.go @@ -140,35 +140,43 @@ func (vt *Tester) LogPath(mode Mode, version provider.Version) string { return vt.logPaths[lgky] } +type RunOptions struct { + Mode Mode + Version provider.Version + TestDirs []string + Tests []string +} + // Run the vcr tests in the given mode and provider version and return the result. // This will overwrite any existing logs for the given mode and version. -func (vt *Tester) Run(mode Mode, version provider.Version, testDirs []string) (Result, error) { - logPath, err := vt.getLogPath(mode, version) +func (vt *Tester) Run(opt RunOptions) (Result, error) { + logPath, err := vt.getLogPath(opt.Mode, opt.Version) if err != nil { return Result{}, err } - repoPath, ok := vt.repoPaths[version] + repoPath, ok := vt.repoPaths[opt.Version] if !ok { - return Result{}, fmt.Errorf("no repo cloned for version %s in %v", version, vt.repoPaths) + return Result{}, fmt.Errorf("no repo cloned for version %s in %v", opt.Version, vt.repoPaths) } if err := vt.rnr.PushDir(repoPath); err != nil { return Result{}, err } - if len(testDirs) == 0 { + if len(opt.TestDirs) == 0 { var err error - testDirs, err = vt.googleTestDirectory() + opt.TestDirs, err = vt.googleTestDirectory() if err != nil { return Result{}, err } + } - cassettePath := filepath.Join(vt.baseDir, "cassettes", version.String()) - switch mode { + cassettePath := filepath.Join(vt.baseDir, "cassettes", opt.Version.String()) + switch opt.Mode { case Replaying: - cassettePath, ok = vt.cassettePaths[version] + cassettePath, ok = vt.cassettePaths[opt.Version] if !ok { - return Result{}, fmt.Errorf("cassettes not fetched for version %s", version) + return Result{}, fmt.Errorf("cassettes not fetched for version %s", opt.Version) } case Recording: if err := vt.rnr.RemoveAll(cassettePath); err != nil { @@ -177,11 +185,11 @@ func (vt *Tester) Run(mode Mode, version provider.Version, testDirs []string) (R if err := vt.rnr.Mkdir(cassettePath); err != nil { return Result{}, fmt.Errorf("error creating cassette dir: %v", err) } - vt.cassettePaths[version] = cassettePath + vt.cassettePaths[opt.Version] = cassettePath } args := []string{"test"} - args = append(args, testDirs...) + args = append(args, opt.TestDirs...) args = append(args, "-parallel", strconv.Itoa(accTestParallelism), @@ -194,11 +202,11 @@ func (vt *Tester) Run(mode Mode, version provider.Version, testDirs []string) (R ) env := map[string]string{ "VCR_PATH": cassettePath, - "VCR_MODE": mode.Upper(), + "VCR_MODE": opt.Mode.Upper(), "ACCTEST_PARALLELISM": strconv.Itoa(accTestParallelism), "GOOGLE_CREDENTIALS": vt.env["SA_KEY"], "GOOGLE_APPLICATION_CREDENTIALS": filepath.Join(vt.baseDir, vt.saKeyPath), - "GOOGLE_TEST_DIRECTORY": strings.Join(testDirs, " "), + "GOOGLE_TEST_DIRECTORY": strings.Join(opt.TestDirs, " "), "TF_LOG": "DEBUG", "TF_LOG_SDK_FRAMEWORK": "INFO", "TF_LOG_PATH_MASK": filepath.Join(logPath, "%s.log"), @@ -224,14 +232,14 @@ func (vt *Tester) Run(mode Mode, version provider.Version, testDirs []string) (R output, testErr := vt.rnr.Run("go", args, env) if testErr != nil { // Use error as output for log. - output = fmt.Sprintf("Error %s tests:\n%v", mode.Lower(), testErr) + output = fmt.Sprintf("Error %s tests:\n%v", opt.Mode.Lower(), testErr) } // Leave repo directory. if err := vt.rnr.PopDir(); err != nil { return Result{}, err } - logFileName := filepath.Join(vt.baseDir, "testlogs", fmt.Sprintf("%s_test.log", mode.Lower())) + logFileName := filepath.Join(vt.baseDir, "testlogs", fmt.Sprintf("%s_test.log", opt.Mode.Lower())) // Write output (or error) to test log. // Append to existing log file. allOutput, _ := vt.rnr.ReadFile(logFileName) @@ -245,35 +253,35 @@ func (vt *Tester) Run(mode Mode, version provider.Version, testDirs []string) (R return collectResult(output), testErr } -func (vt *Tester) RunParallel(mode Mode, version provider.Version, testDirs, tests []string) (Result, error) { - logPath, err := vt.getLogPath(mode, version) +func (vt *Tester) RunParallel(opt RunOptions) (Result, error) { + logPath, err := vt.getLogPath(opt.Mode, opt.Version) if err != nil { return Result{}, err } - if err := vt.rnr.Mkdir(filepath.Join(vt.baseDir, "testlogs", mode.Lower()+"_build")); err != nil { + if err := vt.rnr.Mkdir(filepath.Join(vt.baseDir, "testlogs", opt.Mode.Lower()+"_build")); err != nil { return Result{}, err } - repoPath, ok := vt.repoPaths[version] + repoPath, ok := vt.repoPaths[opt.Version] if !ok { - return Result{}, fmt.Errorf("no repo cloned for version %s in %v", version, vt.repoPaths) + return Result{}, fmt.Errorf("no repo cloned for version %s in %v", opt.Version, vt.repoPaths) } if err := vt.rnr.PushDir(repoPath); err != nil { return Result{}, err } - if len(testDirs) == 0 { + if len(opt.TestDirs) == 0 { var err error - testDirs, err = vt.googleTestDirectory() + opt.TestDirs, err = vt.googleTestDirectory() if err != nil { return Result{}, err } } - cassettePath := filepath.Join(vt.baseDir, "cassettes", version.String()) - switch mode { + cassettePath := filepath.Join(vt.baseDir, "cassettes", opt.Version.String()) + switch opt.Mode { case Replaying: - cassettePath, ok = vt.cassettePaths[version] + cassettePath, ok = vt.cassettePaths[opt.Version] if !ok { - return Result{}, fmt.Errorf("cassettes not fetched for version %s", version) + return Result{}, fmt.Errorf("cassettes not fetched for version %s", opt.Version) } case Recording: if err := vt.rnr.RemoveAll(cassettePath); err != nil { @@ -282,18 +290,18 @@ func (vt *Tester) RunParallel(mode Mode, version provider.Version, testDirs, tes if err := vt.rnr.Mkdir(cassettePath); err != nil { return Result{}, fmt.Errorf("error creating cassette dir: %v", err) } - vt.cassettePaths[version] = cassettePath + vt.cassettePaths[opt.Version] = cassettePath } running := make(chan struct{}, parallelJobs) - outputs := make(chan string, len(testDirs)*len(tests)) + outputs := make(chan string, len(opt.TestDirs)*len(opt.Tests)) wg := &sync.WaitGroup{} - wg.Add(len(testDirs) * len(tests)) - errs := make(chan error, len(testDirs)*len(tests)*2) - for _, testDir := range testDirs { - for _, test := range tests { + wg.Add(len(opt.TestDirs) * len(opt.Tests)) + errs := make(chan error, len(opt.TestDirs)*len(opt.Tests)*2) + for _, testDir := range opt.TestDirs { + for _, test := range opt.Tests { running <- struct{}{} - go vt.runInParallel(mode, version, testDir, test, logPath, cassettePath, running, wg, outputs, errs) + go vt.runInParallel(opt.Mode, opt.Version, testDir, test, logPath, cassettePath, running, wg, outputs, errs) } } @@ -310,7 +318,7 @@ func (vt *Tester) RunParallel(mode Mode, version provider.Version, testDirs, tes for otpt := range outputs { output += otpt } - logFileName := filepath.Join(vt.baseDir, "testlogs", fmt.Sprintf("%s_test.log", mode.Lower())) + logFileName := filepath.Join(vt.baseDir, "testlogs", fmt.Sprintf("%s_test.log", opt.Mode.Lower())) if err := vt.rnr.WriteFile(logFileName, output); err != nil { return Result{}, err } From 814e864860bc9a61bfa5784d469f131f7ac280fd Mon Sep 17 00:00:00 2001 From: Cameron Thornton Date: Wed, 4 Sep 2024 17:21:00 -0500 Subject: [PATCH 34/60] Skip TestAccComputeInstanceTemplate_withNamePrefix in VCR (#11637) --- .../compute/data_source_google_compute_instance.go.erb | 2 +- .../compute/resource_compute_instance_template_test.go.erb | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/mmv1/third_party/terraform/services/compute/data_source_google_compute_instance.go.erb b/mmv1/third_party/terraform/services/compute/data_source_google_compute_instance.go.erb index ca1d87890eb1..7b0734cf6455 100644 --- a/mmv1/third_party/terraform/services/compute/data_source_google_compute_instance.go.erb +++ b/mmv1/third_party/terraform/services/compute/data_source_google_compute_instance.go.erb @@ -59,7 +59,7 @@ func dataSourceGoogleComputeInstanceRead(d *schema.ResourceData, meta interface{ if err != nil { return err } - if err := d.Set("network_inferface", networkInterfaces); err != nil { + if err := d.Set("network_interface", networkInterfaces); err != nil { return err } diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_instance_template_test.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_instance_template_test.go.erb index f84d7330e8c3..81855c1b27ab 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_instance_template_test.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_instance_template_test.go.erb @@ -876,6 +876,9 @@ func TestAccComputeInstanceTemplate_invalidDiskType(t *testing.T) { } func TestAccComputeInstanceTemplate_withNamePrefix(t *testing.T) { + // Randomness from generated name suffix + acctest.SkipIfVcr(t) + t.Parallel() // 8 + 46 = 54 which is the valid max From 17c0a845960a486e973cd31dc87587ceee838df1 Mon Sep 17 00:00:00 2001 From: "Stephen Lewis (Burrows)" Date: Wed, 4 Sep 2024 15:31:47 -0700 Subject: [PATCH 35/60] Removed always-failing build cancellation (#11635) --- .ci/gcb-pr-downstream-generation-and-test.yml | 18 +----------------- 1 file changed, 1 insertion(+), 17 deletions(-) diff --git a/.ci/gcb-pr-downstream-generation-and-test.yml b/.ci/gcb-pr-downstream-generation-and-test.yml index b97ae0daf867..9699417a831f 100644 --- a/.ci/gcb-pr-downstream-generation-and-test.yml +++ b/.ci/gcb-pr-downstream-generation-and-test.yml @@ -1,21 +1,5 @@ --- steps: - - name: 'gcr.io/cloud-builders/gcloud' - id: "Stop Other Ongoing Build" - entrypoint: 'bash' - args: - - -c - - | - on_going_build=($(gcloud builds list --ongoing --format='value[separator=","](id,substitutions.REVISION_ID)' --filter="substitutions.TRIGGER_NAME:$TRIGGER_NAME substitutions._PR_NUMBER:$_PR_NUMBER" | xargs)) - for (( i=0; i<${#on_going_build[@]}; i++ )); do - IFS="," read -r -a fields <<< "${on_going_build[i]}" - if [ "$i" -gt "0" ] && [ "${fields[1]}" != $COMMIT_SHA ]; then # skip current - echo "Cancelling build ${fields[0]}" - - gcloud builds cancel ${fields[0]} - fi - done - # The GCB / GH integration uses a shallow clone of the repo. We need to convert # that to a full clone in order to work with it properly. # https://cloud.google.com/source-repositories/docs/integrating-with-cloud-build#unshallowing_clones @@ -235,7 +219,7 @@ steps: - $COMMIT_SHA - $BUILD_ID - $PROJECT_ID - - "18" # Build step + - "19" # Build step - terraform-google-conversion - name: 'gcr.io/graphite-docker-images/go-plus' From b2c4823efa057bcdaaccac1b9c5d235d7e2f7c98 Mon Sep 17 00:00:00 2001 From: Jieqing Chen <44368060+EZIOJQ@users.noreply.github.com> Date: Wed, 4 Sep 2024 15:41:21 -0700 Subject: [PATCH 36/60] Support X509 Federation (#11493) Co-authored-by: Jieqing(Jay) Chen --- .../iambeta/WorkloadIdentityPoolProvider.yaml | 63 +++++++++++++ ...d_identity_pool_provider_x509_basic.tf.erb | 18 ++++ ...ad_identity_pool_provider_x509_full.tf.erb | 24 +++++ ...orkload_identity_pool_provider_test.go.erb | 92 +++++++++++++++++++ .../iambeta/test-fixtures/intermediate_ca.pem | 3 + .../iambeta/test-fixtures/trust_anchor.pem | 3 + .../test-fixtures/trust_anchor_updated.pem | 3 + 7 files changed, 206 insertions(+) create mode 100644 mmv1/templates/terraform/examples/iam_workload_identity_pool_provider_x509_basic.tf.erb create mode 100644 mmv1/templates/terraform/examples/iam_workload_identity_pool_provider_x509_full.tf.erb create mode 100644 mmv1/third_party/terraform/services/iambeta/test-fixtures/intermediate_ca.pem create mode 100644 mmv1/third_party/terraform/services/iambeta/test-fixtures/trust_anchor.pem create mode 100644 mmv1/third_party/terraform/services/iambeta/test-fixtures/trust_anchor_updated.pem diff --git a/mmv1/products/iambeta/WorkloadIdentityPoolProvider.yaml b/mmv1/products/iambeta/WorkloadIdentityPoolProvider.yaml index 808f8ab45efc..cbf9e401da00 100644 --- a/mmv1/products/iambeta/WorkloadIdentityPoolProvider.yaml +++ b/mmv1/products/iambeta/WorkloadIdentityPoolProvider.yaml @@ -71,6 +71,18 @@ examples: vars: workload_identity_pool_id: 'example-pool' workload_identity_pool_provider_id: 'example-prvdr' + - !ruby/object:Provider::Terraform::Examples + name: 'iam_workload_identity_pool_provider_x509_basic' + primary_resource_id: 'example' + vars: + workload_identity_pool_id: 'example-pool' + workload_identity_pool_provider_id: 'example-prvdr' + - !ruby/object:Provider::Terraform::Examples + name: 'iam_workload_identity_pool_provider_x509_full' + primary_resource_id: 'example' + vars: + workload_identity_pool_id: 'example-pool' + workload_identity_pool_provider_id: 'example-prvdr' custom_code: !ruby/object:Provider::Terraform::CustomCode constants: templates/terraform/constants/iam_workload_identity_pool_provider.go.erb decoder: templates/terraform/decoders/treat_deleted_state_as_gone.go.erb @@ -222,6 +234,7 @@ properties: - aws - oidc - saml + - x509 properties: - !ruby/object:Api::Type::String name: accountId @@ -236,6 +249,7 @@ properties: - aws - oidc - saml + - x509 update_mask_fields: - 'oidc.allowed_audiences' - 'oidc.issuer_uri' @@ -297,8 +311,57 @@ properties: - aws - oidc - saml + - x509 properties: - !ruby/object:Api::Type::String name: idpMetadataXml description: SAML Identity provider configuration metadata xml doc. required: true + - !ruby/object:Api::Type::NestedObject + name: x509 + description: | + An X.509-type identity provider represents a CA. It is trusted to assert a + client identity if the client has a certificate that chains up to this CA. + exactly_one_of: + - aws + - oidc + - saml + - x509 + properties: + - !ruby/object:Api::Type::NestedObject + name: trustStore + description: | + A Trust store, use this trust store as a wrapper to config the trust + anchor and optional intermediate cas to help build the trust chain for + the incoming end entity certificate. Follow the x509 guidelines to + define those PEM encoded certs. Only 1 trust store is currently + supported. + required: true + properties: + - !ruby/object:Api::Type::Array + name: trustAnchors + description: | + List of Trust Anchors to be used while performing validation + against a given TrustStore. The incoming end entity's certificate + must be chained up to one of the trust anchors here. + required: true + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: pemCertificate + description: | + PEM certificate of the PKI used for validation. Must only contain one + ca certificate(either root or intermediate cert). + - !ruby/object:Api::Type::Array + name: intermediateCas + description: | + Set of intermediate CA certificates used for building the trust chain to + trust anchor. + IMPORTANT: Intermediate CAs are only supported when configuring x509 federation. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: pemCertificate + description: | + PEM certificate of the PKI used for validation. Must only contain one + ca certificate(either root or intermediate cert). diff --git a/mmv1/templates/terraform/examples/iam_workload_identity_pool_provider_x509_basic.tf.erb b/mmv1/templates/terraform/examples/iam_workload_identity_pool_provider_x509_basic.tf.erb new file mode 100644 index 000000000000..b049e6792304 --- /dev/null +++ b/mmv1/templates/terraform/examples/iam_workload_identity_pool_provider_x509_basic.tf.erb @@ -0,0 +1,18 @@ +resource "google_iam_workload_identity_pool" "pool" { + workload_identity_pool_id = "<%= ctx[:vars]["workload_identity_pool_id"] %>" +} + +resource "google_iam_workload_identity_pool_provider" "<%= ctx[:primary_resource_id] %>" { + workload_identity_pool_id = google_iam_workload_identity_pool.pool.workload_identity_pool_id + workload_identity_pool_provider_id = "<%= ctx[:vars]["workload_identity_pool_provider_id"] %>" + attribute_mapping = { + "google.subject" = "assertion.subject.dn.cn" + } + x509 { + trust_store { + trust_anchors { + pem_certificate = file("test-fixtures/trust_anchor.pem") + } + } + } +} diff --git a/mmv1/templates/terraform/examples/iam_workload_identity_pool_provider_x509_full.tf.erb b/mmv1/templates/terraform/examples/iam_workload_identity_pool_provider_x509_full.tf.erb new file mode 100644 index 000000000000..6902c02b31ff --- /dev/null +++ b/mmv1/templates/terraform/examples/iam_workload_identity_pool_provider_x509_full.tf.erb @@ -0,0 +1,24 @@ +resource "google_iam_workload_identity_pool" "pool" { + workload_identity_pool_id = "<%= ctx[:vars]["workload_identity_pool_id"] %>" +} + +resource "google_iam_workload_identity_pool_provider" "<%= ctx[:primary_resource_id] %>" { + workload_identity_pool_id = google_iam_workload_identity_pool.pool.workload_identity_pool_id + workload_identity_pool_provider_id = "<%= ctx[:vars]["workload_identity_pool_provider_id"] %>" + display_name = "Name of provider" + description = "X.509 identity pool provider for automated test" + disabled = true + attribute_mapping = { + "google.subject" = "assertion.subject.dn.cn" + } + x509 { + trust_store { + trust_anchors { + pem_certificate = file("test-fixtures/trust_anchor.pem") + } + intermediate_cas { + pem_certificate = file("test-fixtures/intermediate_ca.pem") + } + } + } +} diff --git a/mmv1/third_party/terraform/services/iambeta/resource_iam_workload_identity_pool_provider_test.go.erb b/mmv1/third_party/terraform/services/iambeta/resource_iam_workload_identity_pool_provider_test.go.erb index 9ec1c89adae1..ce55167c51b5 100644 --- a/mmv1/third_party/terraform/services/iambeta/resource_iam_workload_identity_pool_provider_test.go.erb +++ b/mmv1/third_party/terraform/services/iambeta/resource_iam_workload_identity_pool_provider_test.go.erb @@ -240,4 +240,96 @@ resource "google_iam_workload_identity_pool_provider" "my_provider" { `, context) } +func TestAccIAMBetaWorkloadIdentityPoolProvider_x509(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckIAMBetaWorkloadIdentityPoolProviderDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccIAMBetaWorkloadIdentityPoolProvider_x509_full(context), + }, + { + ResourceName: "google_iam_workload_identity_pool_provider.example", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"workload_identity_pool_id", "workload_identity_pool_provider_id"}, + }, + { + Config: testAccIAMBetaWorkloadIdentityPoolProvider_x509_update(context), + }, + { + ResourceName: "google_iam_workload_identity_pool_provider.example", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"workload_identity_pool_id", "workload_identity_pool_provider_id"}, + }, + }, + }) +} + +func testAccIAMBetaWorkloadIdentityPoolProvider_x509_full(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_iam_workload_identity_pool" "pool" { + workload_identity_pool_id = "tf-test-example-pool%{random_suffix}" +} + +resource "google_iam_workload_identity_pool_provider" "example" { + workload_identity_pool_id = google_iam_workload_identity_pool.pool.workload_identity_pool_id + workload_identity_pool_provider_id = "tf-test-example-prvdr%{random_suffix}" + display_name = "Name of provider" + description = "X.509 identity pool provider for automated test" + disabled = true + attribute_mapping = { + "google.subject" = "assertion.subject.dn.cn" + } + x509 { + trust_store { + trust_anchors { + pem_certificate = file("test-fixtures/trust_anchor.pem") + } + intermediate_cas { + pem_certificate = file("test-fixtures/intermediate_ca.pem") + } + } + } +} +`, context) +} + +func testAccIAMBetaWorkloadIdentityPoolProvider_x509_update(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_iam_workload_identity_pool" "pool" { + workload_identity_pool_id = "tf-test-example-pool%{random_suffix}" +} + +resource "google_iam_workload_identity_pool_provider" "example" { + workload_identity_pool_id = google_iam_workload_identity_pool.pool.workload_identity_pool_id + workload_identity_pool_provider_id = "tf-test-example-prvdr%{random_suffix}" + display_name = "Name of provider" + description = "X.509 identity pool provider for automated test" + disabled = true + attribute_mapping = { + "google.subject" = "assertion.subject.dn.cn" + } + x509 { + trust_store { + trust_anchors { + pem_certificate = file("test-fixtures/trust_anchor_updated.pem") + } + trust_anchors { + pem_certificate = file("test-fixtures/intermediate_ca.pem") + } + } + } +} +`, context) +} + <% end -%> diff --git a/mmv1/third_party/terraform/services/iambeta/test-fixtures/intermediate_ca.pem b/mmv1/third_party/terraform/services/iambeta/test-fixtures/intermediate_ca.pem new file mode 100644 index 000000000000..e6c091b0c027 --- /dev/null +++ b/mmv1/third_party/terraform/services/iambeta/test-fixtures/intermediate_ca.pem @@ -0,0 +1,3 @@ +-----BEGIN CERTIFICATE----- +MIIDvjCCAqagAwIBAgIIXHIdYNfGCLMwDQYJKoZIhvcNAQELBQAwXDEXMBUGA1UEChMOR29vZ2xlIFRFU1RJTkcxHDAaBgNVBAsTE0dvb2dsZSBURVNUSU5HIHVuaXQxIzAhBgNVBAMMGkdvb2dsZSAqKlRlc3RpbmcqKiBSb290IENBMCAXDTIwMDEwMTAwMDAwMFoYDzIxMzAwMTAxMDAwMDAwWjBkMRcwFQYDVQQKEw5Hb29nbGUgVEVTVElORzEcMBoGA1UECxMTR29vZ2xlIFRFU1RJTkcgdW5pdDErMCkGA1UEAwwiR29vZ2xlICoqVGVzdGluZyoqIEludGVybWVkaWF0ZSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALVSyoOVAMfOYf10VxNwmRs/afgbkYJ5gPXzn3aetEtFrcqDiG8LdgniLhHcp3c/O1U0EKANMLWyMlHP0KC4wjMrYXS8doQ7B6kGXj070hRSHN7acF0ImSRw3i9idiiSgOIJzlbYXeh8NLqDAYESyWFht7RRdbCJx1v2U9T8F5QVeT96Dw3exSKQAIdL2J8ol9kgJNINimd7GxOQ3f1+vDOBiAtAj4zWCjjqdNqh5ivyrTu28J/umM35wtHS6iX6GvnwqTsRy8zS/KeN+Dq6PEk/j04mrOG3w82SFp+IvTNH6S/DxRyiE5yoAfgfunKotV34JVr/INH2RsEgbWtwK5ECAwEAAaN6MHgwDgYDVR0PAQH/BAQDAgIEMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAPBgNVHRMBAf8EBTADAQH/MBkGA1UdDgQSBBB0QukRw23faHpDoa0xl+L+MBsGA1UdIwQUMBKAEMRB36Doz2ZXloHKRyZ8hN0wDQYJKoZIhvcNAQELBQADggEBAIPBiuirnZbv4JWDiAIXvCE5pbwej0aKFWEDV2Z8lY0RFPt1CXrDJJL91MXHZ1yviUSJINJErJn7wyGV2bm/N7DUpRE0g9IMgEank64UUl+OyQTXd0LIsjlqWA6Sj/hDZUdw6mi9a98ENUr6CiECtOxpGF9kj4G4WcnyvPP/phs1b8cAfQ+tPurrDRBAdeoQIj756QL7fvMijKNdG5KeCURu9L4BZmCeuy/3v2C2XjiFZHx4cZDOHJizrx04GqzV5PSXw5OYZiXfn5WPGMsyh7ufkQJKJcpv2t3M0gyc1omD0xWkxCl4dTdVef9HrboJnZkUrma509fpVL6F8I2Jkt8= +-----END CERTIFICATE----- \ No newline at end of file diff --git a/mmv1/third_party/terraform/services/iambeta/test-fixtures/trust_anchor.pem b/mmv1/third_party/terraform/services/iambeta/test-fixtures/trust_anchor.pem new file mode 100644 index 000000000000..76d69c7b0810 --- /dev/null +++ b/mmv1/third_party/terraform/services/iambeta/test-fixtures/trust_anchor.pem @@ -0,0 +1,3 @@ +-----BEGIN CERTIFICATE----- +MIIDtjCCAp6gAwIBAgIIMu9pvsFWIUcwDQYJKoZIhvcNAQELBQAwXDEXMBUGA1UEChMOR29vZ2xlIFRFU1RJTkcxHDAaBgNVBAsTE0dvb2dsZSBURVNUSU5HIHVuaXQxIzAhBgNVBAMMGkdvb2dsZSAqKlRlc3RpbmcqKiBSb290IENBMCAXDTIwMDEwMTAwMDAwMFoYDzIxMzAwMTAxMDAwMDAwWjBcMRcwFQYDVQQKEw5Hb29nbGUgVEVTVElORzEcMBoGA1UECxMTR29vZ2xlIFRFU1RJTkcgdW5pdDEjMCEGA1UEAwwaR29vZ2xlICoqVGVzdGluZyoqIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDHqdf17AA4iwl+HGtTD/qJJv9XZTjzjHiaRYWuGQ3iddf1LhYklLqHvpqOCBlhkg6NIZmoDPKicEi8pfGSzp6btcElyrr1ekECk5jEBcdl6tX/gTSYv7v1h9DkSJDHBAnoSsVW0/PNwI+YGEE2kizGMeg1moXlTHEB+yeGCik/+4eVRas/+wrlrTE5lMFMq8WhdnBx6udcc/BEauvlTybHaN3rJUVpVrJWeVoPGGtXR6MJrdbVScn9GYOLP6sXMPTr+pTY6ebrjs7K/wTVQqLJ1zArCvAEH8FvuAhiI19yM1uBRAds/fJbSUvFsEiIlDC4y7DAK7yWHUAjQIBV1xhDAgMBAAGjejB4MA4GA1UdDwEB/wQEAwICBDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAZBgNVHQ4EEgQQxEHfoOjPZleWgcpHJnyE3TAbBgNVHSMEFDASgBDEQd+g6M9mV5aBykcmfITdMA0GCSqGSIb3DQEBCwUAA4IBAQBcECwxgaK4ZgYO7guayK5QRpTb3Y6zpkmSOkj9h1+HF5Ch/o5FiweJi8k28h9Mfz//gKU2cXXWfXuY81CFEBstjw7jYt3+d4owS5sYKu5WswGj4jsQXRrt0tO0+0UngP560eggFCyLChG75lp54r92hTPk4fY4varURZH7X0jg9W7MO6E6/HmKdMIxanuWdkbpPe6kj5I7SNvVhqsncoga8iZJ6QK/rsC2LTax4dxUUSkP5vmwwiYbgXYbk9JcXI+OyVcMvVxN/akI9/JgYHOol6NdChTBwU0yjNb9B5TrgtP1/fs+LugINrN1R1hgVHDVlE4mwHej+5XL6m9xAxkc +-----END CERTIFICATE----- \ No newline at end of file diff --git a/mmv1/third_party/terraform/services/iambeta/test-fixtures/trust_anchor_updated.pem b/mmv1/third_party/terraform/services/iambeta/test-fixtures/trust_anchor_updated.pem new file mode 100644 index 000000000000..ad85340983f4 --- /dev/null +++ b/mmv1/third_party/terraform/services/iambeta/test-fixtures/trust_anchor_updated.pem @@ -0,0 +1,3 @@ +-----BEGIN CERTIFICATE----- +MIIDtzCCAp+gAwIBAgIJAJbcoF4dvsxvMA0GCSqGSIb3DQEBCwUAMFwxFzAVBgNVBAoTDkdvb2dsZSBURVNUSU5HMRwwGgYDVQQLExNHb29nbGUgVEVTVElORyB1bml0MSMwIQYDVQQDDBpHb29nbGUgKipUZXN0aW5nKiogUm9vdCBDQTAgFw0yMDAxMDEwMDAwMDBaGA8yMTMwMDEwMTAwMDAwMFowXDEXMBUGA1UEChMOR29vZ2xlIFRFU1RJTkcxHDAaBgNVBAsTE0dvb2dsZSBURVNUSU5HIHVuaXQxIzAhBgNVBAMMGkdvb2dsZSAqKlRlc3RpbmcqKiBSb290IENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAzVxH5w07E7O649oZHElZEyMGpYW1rS/yU58AYp9UgDjgNZchDCByn96uUdKiwzQRyFg+Nu/DifcpKbB8rzQtJoDqqpdPqmd2uxw13UNGeScsFGFoqIdOeU/l+SCAB1GvNHpMNoeFFzH9Ly1fFWRHXa/Upw1WYtKxMKM8pVLqodowTKuVMQHVMpVILhb1nlpZvsFR5i2LhG1U4jxVOVGC7OHTsUHNlFo547kM5MNnArl6vPx2LOZgA+2JeOM0zbiMk4DK+ks5eaXXapy5QMQ16PUIlq1oTgAYMRXk31TrmdMwu79FeIz3vHXgJnGhdmLqse3rE05cWuoEsuao01NpYwIDAQABo3oweDAOBgNVHQ8BAf8EBAMCAgQwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMA8GA1UdEwEB/wQFMAMBAf8wGQYDVR0OBBIEEMRB36Doz2ZXloHKRyZ8hN0wGwYDVR0jBBQwEoAQxEHfoOjPZleWgcpHJnyE3TANBgkqhkiG9w0BAQsFAAOCAQEAiim4OoNHHskwK1etk1Xswb+pB1OweUFD9iSHvkgnTw+RLuep+OYsKciz5GeZIWiKkWZMnJYTd41bX29fdIdcd1b3MjFVQ8jqKVkblb1NjYWLIwZshimtbIEZTDb/sxalQiwSH/SE2fi/8E8P8jxbntCeOyDw1/lce3tkYrJHlTNDZLNnr5Od+stpYi8EaPSnilEgLIMTsBwyaxUp8yWbxT8+M0JhJYmpHnSkC0vc2aLTYmgyv7URl14XIK2aumSISAuaCXYGj0hx8Wz0YNn1uydnZw7kmzn4ncZqfwoJuHJl/5kWq0nycgcieUNg65VxEhqHTnC6NXKjyBZKVXQaxQ== +-----END CERTIFICATE----- \ No newline at end of file From c4eadf3d3f8350f45c8b049212ca88645f18208b Mon Sep 17 00:00:00 2001 From: Sarah French <15078782+SarahFrench@users.noreply.github.com> Date: Thu, 5 Sep 2024 12:03:50 +0100 Subject: [PATCH 37/60] Use acceptance tests to test handling of the credentials provider configuration argument, add new data source that surfaces configuration of SDK and plugin-framework providers to facilitate acctests (#11599) --- .../terraform/acctest/vcr_utils.go | 15 + ...source_provider_config_plugin_framework.go | 231 +++++++++++++++ .../framework_provider_credentials_test.go | 270 ++++++++++++++++++ .../fwtransport/framework_config.go.erb | 11 + .../fwtransport/framework_config_test.go.erb | 161 ----------- .../fwtransport/go/framework_config_test.go | 161 ----------- .../data_source_provider_config_sdk.go | 164 +++++++++++ .../provider/provider_credentials_test.go | 269 +++++++++++++++++ .../provider/provider_internal_test.go | 162 ----------- .../provider/provider_mmv1_resources.go.erb | 1 - .../terraform/provider/provider_test.go.erb | 69 ----- 11 files changed, 960 insertions(+), 554 deletions(-) create mode 100644 mmv1/third_party/terraform/fwprovider/data_source_provider_config_plugin_framework.go create mode 100644 mmv1/third_party/terraform/fwprovider/framework_provider_credentials_test.go create mode 100644 mmv1/third_party/terraform/provider/data_source_provider_config_sdk.go create mode 100644 mmv1/third_party/terraform/provider/provider_credentials_test.go diff --git a/mmv1/third_party/terraform/acctest/vcr_utils.go b/mmv1/third_party/terraform/acctest/vcr_utils.go index f96ff0b3ee60..aed2036f55cb 100644 --- a/mmv1/third_party/terraform/acctest/vcr_utils.go +++ b/mmv1/third_party/terraform/acctest/vcr_utils.go @@ -31,6 +31,8 @@ import ( "github.com/dnaeon/go-vcr/recorder" "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/datasource" + fwDiags "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/provider" "github.com/hashicorp/terraform-plugin-framework/types" @@ -436,6 +438,14 @@ func (p *frameworkTestProvider) Configure(ctx context.Context, req provider.Conf } } +// DataSources overrides the provider's DataSources function so that we can append test-specific data sources to the list of data sources on the provider. +// This makes the data source(s) usable only in the context of acctests, and isn't available to users +func (p *frameworkTestProvider) DataSources(ctx context.Context) []func() datasource.DataSource { + ds := p.FrameworkProvider.DataSources(ctx) + ds = append(ds, fwprovider.NewGoogleProviderConfigPluginFrameworkDataSource) // google_provider_config_plugin_framework + return ds +} + func configureApiClient(ctx context.Context, p *fwprovider.FrameworkProvider, diags *fwDiags.Diagnostics) { var data fwmodels.ProviderModel var d fwDiags.Diagnostics @@ -453,6 +463,11 @@ func configureApiClient(ctx context.Context, p *fwprovider.FrameworkProvider, di // GetSDKProvider gets the SDK provider with an overwritten configure function to be called by MuxedProviders func GetSDKProvider(testName string) *schema.Provider { prov := tpgprovider.Provider() + + // Append a test-specific data source to the list of data sources on the provider + // This makes the data source(s) usable only in the context of acctests, and isn't available to users + prov.DataSourcesMap["google_provider_config_sdk"] = tpgprovider.DataSourceGoogleProviderConfigSdk() + if IsVcrEnabled() { old := prov.ConfigureContextFunc prov.ConfigureContextFunc = func(ctx context.Context, d *schema.ResourceData) (interface{}, diag.Diagnostics) { diff --git a/mmv1/third_party/terraform/fwprovider/data_source_provider_config_plugin_framework.go b/mmv1/third_party/terraform/fwprovider/data_source_provider_config_plugin_framework.go new file mode 100644 index 000000000000..8aa78227fd30 --- /dev/null +++ b/mmv1/third_party/terraform/fwprovider/data_source_provider_config_plugin_framework.go @@ -0,0 +1,231 @@ +package fwprovider + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-provider-google/google/fwmodels" + "github.com/hashicorp/terraform-provider-google/google/fwresource" + "github.com/hashicorp/terraform-provider-google/google/fwtransport" +) + +// Ensure the data source satisfies the expected interfaces. +var ( + _ datasource.DataSource = &GoogleProviderConfigPluginFrameworkDataSource{} + _ datasource.DataSourceWithConfigure = &GoogleProviderConfigPluginFrameworkDataSource{} + _ fwresource.LocationDescriber = &GoogleProviderConfigPluginFrameworkModel{} +) + +func NewGoogleProviderConfigPluginFrameworkDataSource() datasource.DataSource { + return &GoogleProviderConfigPluginFrameworkDataSource{} +} + +type GoogleProviderConfigPluginFrameworkDataSource struct { + providerConfig *fwtransport.FrameworkProviderConfig +} + +type GoogleProviderConfigPluginFrameworkModel struct { + // Currently this reflects the FrameworkProviderConfig struct and ProviderModel in google/fwmodels/provider_model.go + // which means it uses the plugin-framework type system where values can be explicitly Null or Unknown. + // + // As part of future muxing fixes/refactoring we'll change this struct to reflect structs used in the SDK code, and will move to + // using the SDK type system. + Credentials types.String `tfsdk:"credentials"` + AccessToken types.String `tfsdk:"access_token"` + ImpersonateServiceAccount types.String `tfsdk:"impersonate_service_account"` + ImpersonateServiceAccountDelegates types.List `tfsdk:"impersonate_service_account_delegates"` + Project types.String `tfsdk:"project"` + BillingProject types.String `tfsdk:"billing_project"` + Region types.String `tfsdk:"region"` + Zone types.String `tfsdk:"zone"` + Scopes types.List `tfsdk:"scopes"` + // omit Batching + UserProjectOverride types.Bool `tfsdk:"user_project_override"` + RequestTimeout types.String `tfsdk:"request_timeout"` + RequestReason types.String `tfsdk:"request_reason"` + UniverseDomain types.String `tfsdk:"universe_domain"` + DefaultLabels types.Map `tfsdk:"default_labels"` + AddTerraformAttributionLabel types.Bool `tfsdk:"add_terraform_attribution_label"` + TerraformAttributionLabelAdditionStrategy types.String `tfsdk:"terraform_attribution_label_addition_strategy"` +} + +func (m *GoogleProviderConfigPluginFrameworkModel) GetLocationDescription(providerConfig *fwtransport.FrameworkProviderConfig) fwresource.LocationDescription { + return fwresource.LocationDescription{ + RegionSchemaField: types.StringValue("region"), + ZoneSchemaField: types.StringValue("zone"), + ProviderRegion: providerConfig.Region, + ProviderZone: providerConfig.Zone, + } +} + +func (d *GoogleProviderConfigPluginFrameworkDataSource) Metadata(ctx context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_provider_config_plugin_framework" +} + +func (d *GoogleProviderConfigPluginFrameworkDataSource) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + + resp.Schema = schema.Schema{ + + Description: "Use this data source to access the configuration of the Google Cloud provider. This data source is implemented with the SDK.", + MarkdownDescription: "Use this data source to access the configuration of the Google Cloud provider. This data source is implemented with the SDK.", + Attributes: map[string]schema.Attribute{ + // Start of user inputs + "access_token": schema.StringAttribute{ + Description: "The access_token argument used to configure the provider", + MarkdownDescription: "The access_token argument used to configure the provider", + Computed: true, + Sensitive: true, + }, + "credentials": schema.StringAttribute{ + Description: "The credentials argument used to configure the provider", + MarkdownDescription: "The credentials argument used to configure the provider", + Computed: true, + Sensitive: true, + }, + "impersonate_service_account": schema.StringAttribute{ + Description: "The impersonate_service_account argument used to configure the provider", + MarkdownDescription: "The impersonate_service_account argument used to configure the provider.", + Computed: true, + }, + "impersonate_service_account_delegates": schema.ListAttribute{ + ElementType: types.StringType, + Description: "The impersonate_service_account_delegates argument used to configure the provider", + MarkdownDescription: "The impersonate_service_account_delegates argument used to configure the provider.", + Computed: true, + }, + "project": schema.StringAttribute{ + Description: "The project argument used to configure the provider", + MarkdownDescription: "The project argument used to configure the provider.", + Computed: true, + }, + "region": schema.StringAttribute{ + Description: "The region argument used to configure the provider.", + MarkdownDescription: "The region argument used to configure the provider.", + Computed: true, + }, + "billing_project": schema.StringAttribute{ + Description: "The billing_project argument used to configure the provider.", + MarkdownDescription: "The billing_project argument used to configure the provider.", + Computed: true, + }, + "zone": schema.StringAttribute{ + Description: "The zone argument used to configure the provider.", + MarkdownDescription: "The zone argument used to configure the provider.", + Computed: true, + }, + "universe_domain": schema.StringAttribute{ + Description: "The universe_domain argument used to configure the provider.", + MarkdownDescription: "The universe_domain argument used to configure the provider.", + Computed: true, + }, + "scopes": schema.ListAttribute{ + ElementType: types.StringType, + Description: "The scopes argument used to configure the provider.", + MarkdownDescription: "The scopes argument used to configure the provider.", + Computed: true, + }, + "user_project_override": schema.BoolAttribute{ + Description: "The user_project_override argument used to configure the provider.", + MarkdownDescription: "The user_project_override argument used to configure the provider.", + Computed: true, + }, + "request_reason": schema.StringAttribute{ + Description: "The request_reason argument used to configure the provider.", + MarkdownDescription: "The request_reason argument used to configure the provider.", + Computed: true, + }, + "request_timeout": schema.StringAttribute{ + Description: "The request_timeout argument used to configure the provider.", + MarkdownDescription: "The request_timeout argument used to configure the provider.", + Computed: true, + }, + "default_labels": schema.MapAttribute{ + ElementType: types.StringType, + Description: "The default_labels argument used to configure the provider.", + MarkdownDescription: "The default_labels argument used to configure the provider.", + Computed: true, + }, + "add_terraform_attribution_label": schema.BoolAttribute{ + Description: "The add_terraform_attribution_label argument used to configure the provider.", + MarkdownDescription: "The add_terraform_attribution_label argument used to configure the provider.", + Computed: true, + }, + "terraform_attribution_label_addition_strategy": schema.StringAttribute{ + Description: "The terraform_attribution_label_addition_strategy argument used to configure the provider.", + MarkdownDescription: "The terraform_attribution_label_addition_strategy argument used to configure the provider.", + Computed: true, + }, + // End of user inputs + + // Note - this data source excludes the default and custom endpoints for individual services + }, + } +} + +func (d *GoogleProviderConfigPluginFrameworkDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + + p, ok := req.ProviderData.(*fwtransport.FrameworkProviderConfig) + if !ok { + resp.Diagnostics.AddError( + "Unexpected Data Source Configure Type", + fmt.Sprintf("Expected *fwtransport.FrameworkProviderConfig, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + return + } + + // Required for accessing project, region, zone and tokenSource + d.providerConfig = p +} + +func (d *GoogleProviderConfigPluginFrameworkDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + var data GoogleProviderConfigPluginFrameworkModel + var metaData *fwmodels.ProviderMetaModel + + // Read Provider meta into the meta model + resp.Diagnostics.Append(req.ProviderMeta.Get(ctx, &metaData)...) + if resp.Diagnostics.HasError() { + return + } + + // Read Terraform configuration data into the model + resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + + // Copy all values from the provider config into this data source + + data.Credentials = d.providerConfig.Credentials + // TODO(SarahFrench) - access_token + // TODO(SarahFrench) - impersonate_service_account + // TODO(SarahFrench) - impersonate_service_account_delegates + data.Project = d.providerConfig.Project + data.Region = d.providerConfig.Region + data.BillingProject = d.providerConfig.BillingProject + data.Zone = d.providerConfig.Zone + data.UniverseDomain = d.providerConfig.UniverseDomain + data.Scopes = d.providerConfig.Scopes + data.UserProjectOverride = d.providerConfig.UserProjectOverride + // TODO(SarahFrench) - request_reason + // TODO(SarahFrench) - request_timeout + data.DefaultLabels = d.providerConfig.DefaultLabels + // TODO(SarahFrench) - add_terraform_attribution_label + // TODO(SarahFrench) - terraform_attribution_label_addition_strategy + + // Warn users against using this data source + resp.Diagnostics.Append(diag.NewWarningDiagnostic( + "Data source google_provider_config_plugin_framework should not be used", + "Data source google_provider_config_plugin_framework is intended to be used only in acceptance tests for the provider. Instead, please use the google_client_config data source to access provider configuration details, or open a GitHub issue requesting new features in that datasource. Please go to: https://github.com/hashicorp/terraform-provider-google/issues/new/choose", + )) + + // Save data into Terraform state + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) +} diff --git a/mmv1/third_party/terraform/fwprovider/framework_provider_credentials_test.go b/mmv1/third_party/terraform/fwprovider/framework_provider_credentials_test.go new file mode 100644 index 000000000000..1203fd51f1e9 --- /dev/null +++ b/mmv1/third_party/terraform/fwprovider/framework_provider_credentials_test.go @@ -0,0 +1,270 @@ +package fwprovider_test + +import ( + "regexp" + "testing" + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +// TestAccFwProvider_credentials is a series of acc tests asserting how the plugin-framework provider handles credentials arguments +// It is PF specific because the HCL used uses a PF-implemented data source +// It is a counterpart to TestAccSdkProvider_credentials +func TestAccFwProvider_credentials(t *testing.T) { + testCases := map[string]func(t *testing.T){ + "credentials can be configured as a path to a credentials JSON file": testAccFwProvider_credentials_validJsonFilePath, + "configuring credentials as a path to a non-existent file results in an error": testAccFwProvider_credentials_badJsonFilepathCausesError, + "config takes precedence over environment variables": testAccFwProvider_credentials_configPrecedenceOverEnvironmentVariables, + "when credentials is unset in the config, environment variables are used in a given order": testAccFwProvider_credentials_precedenceOrderEnvironmentVariables, // GOOGLE_CREDENTIALS, GOOGLE_CLOUD_KEYFILE_JSON, GCLOUD_KEYFILE_JSON, GOOGLE_APPLICATION_CREDENTIALS + "when credentials is set to an empty string in the config the value isn't ignored and results in an error": testAccFwProvider_credentials_emptyStringValidation, + } + + for name, tc := range testCases { + // shadow the tc variable into scope so that when + // the loop continues, if t.Run hasn't executed tc(t) + // yet, we don't have a race condition + // see https://github.com/golang/go/wiki/CommonMistakes#using-goroutines-on-loop-iterator-variables + tc := tc + t.Run(name, func(t *testing.T) { + tc(t) + }) + } +} + +func testAccFwProvider_credentials_validJsonFilePath(t *testing.T) { + acctest.SkipIfVcr(t) // Test doesn't interact with API + + // unset all credentials env vars + for _, v := range envvar.CredsEnvVars { + t.Setenv(v, "") + } + + credentials := transport_tpg.TestFakeCredentialsPath + + context := map[string]interface{}{ + "credentials": credentials, + } + + acctest.VcrTest(t, resource.TestCase{ + // No PreCheck for checking ENVs + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + // Credentials set as what we expect + Config: testAccFwProvider_credentialsInProviderBlock(context), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr("data.google_provider_config_plugin_framework.default", "credentials", credentials), + ), + }, + }, + }) +} + +func testAccFwProvider_credentials_badJsonFilepathCausesError(t *testing.T) { + acctest.SkipIfVcr(t) // Test doesn't interact with API + + // unset all credentials env vars + for _, v := range envvar.CredsEnvVars { + t.Setenv(v, "") + } + + pathToMissingFile := "./this/path/does/not/exist.json" // Doesn't exist + + context := map[string]interface{}{ + "credentials": pathToMissingFile, + } + + acctest.VcrTest(t, resource.TestCase{ + // No PreCheck for checking ENVs + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + // Apply-time error due to the file not existing + Config: testAccFwProvider_credentialsInProviderBlock(context), + PlanOnly: true, + ExpectError: regexp.MustCompile("JSON credentials are not valid"), + }, + }, + }) +} + +func testAccFwProvider_credentials_configPrecedenceOverEnvironmentVariables(t *testing.T) { + acctest.SkipIfVcr(t) // Test doesn't interact with API + + credentials := envvar.GetTestCredsFromEnv() + + // ensure all possible credentials env vars set; show they aren't used + for _, v := range envvar.CredsEnvVars { + t.Setenv(v, credentials) + } + + pathToMissingFile := "./this/path/does/not/exist.json" // Doesn't exist + + context := map[string]interface{}{ + "credentials": pathToMissingFile, + } + + acctest.VcrTest(t, resource.TestCase{ + // No PreCheck for checking ENVs + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + // Apply-time error; bad value in config is used over of good values in ENVs + Config: testAccFwProvider_credentialsInProviderBlock(context), + PlanOnly: true, + ExpectError: regexp.MustCompile("JSON credentials are not valid"), + }, + }, + }) +} + +func testAccFwProvider_credentials_precedenceOrderEnvironmentVariables(t *testing.T) { + acctest.SkipIfVcr(t) // Test doesn't interact with API + /* + These are all the ENVs for credentials, and they are in order of precedence. + GOOGLE_CREDENTIALS + GOOGLE_CLOUD_KEYFILE_JSON + GCLOUD_KEYFILE_JSON + GOOGLE_APPLICATION_CREDENTIALS + GOOGLE_USE_DEFAULT_CREDENTIALS + */ + + GOOGLE_CREDENTIALS := acctest.GenerateFakeCredentialsJson("GOOGLE_CREDENTIALS") + GOOGLE_CLOUD_KEYFILE_JSON := acctest.GenerateFakeCredentialsJson("GOOGLE_CLOUD_KEYFILE_JSON") + GCLOUD_KEYFILE_JSON := acctest.GenerateFakeCredentialsJson("GCLOUD_KEYFILE_JSON") + GOOGLE_APPLICATION_CREDENTIALS := "./fake/file/path/nonexistent/a/credentials.json" // GOOGLE_APPLICATION_CREDENTIALS needs to be a path, not JSON + + context := map[string]interface{}{} + + acctest.VcrTest(t, resource.TestCase{ + // No PreCheck for checking ENVs + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + // GOOGLE_CREDENTIALS is used 1st if set + PreConfig: func() { + t.Setenv("GOOGLE_CREDENTIALS", GOOGLE_CREDENTIALS) //used + t.Setenv("GOOGLE_CLOUD_KEYFILE_JSON", GOOGLE_CLOUD_KEYFILE_JSON) + t.Setenv("GCLOUD_KEYFILE_JSON", GCLOUD_KEYFILE_JSON) + t.Setenv("GOOGLE_APPLICATION_CREDENTIALS", GOOGLE_APPLICATION_CREDENTIALS) + }, + Config: testAccFwProvider_credentialsInEnvsOnly(context), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr("data.google_provider_config_plugin_framework.default", "credentials", GOOGLE_CREDENTIALS), + ), + }, + { + // GOOGLE_CLOUD_KEYFILE_JSON is used 2nd + PreConfig: func() { + // unset + t.Setenv("GOOGLE_CREDENTIALS", "") + // set + t.Setenv("GOOGLE_CLOUD_KEYFILE_JSON", GOOGLE_CLOUD_KEYFILE_JSON) //used + t.Setenv("GCLOUD_KEYFILE_JSON", GCLOUD_KEYFILE_JSON) + t.Setenv("GOOGLE_APPLICATION_CREDENTIALS", GOOGLE_APPLICATION_CREDENTIALS) + + }, + Config: testAccFwProvider_credentialsInEnvsOnly(context), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr("data.google_provider_config_plugin_framework.default", "credentials", GOOGLE_CLOUD_KEYFILE_JSON), + ), + }, + { + // GOOGLE_CLOUD_KEYFILE_JSON is used 3rd + PreConfig: func() { + // unset + t.Setenv("GOOGLE_CREDENTIALS", "") + t.Setenv("GOOGLE_CLOUD_KEYFILE_JSON", "") + // set + t.Setenv("GCLOUD_KEYFILE_JSON", GCLOUD_KEYFILE_JSON) //used + t.Setenv("GOOGLE_APPLICATION_CREDENTIALS", GOOGLE_APPLICATION_CREDENTIALS) + }, + Config: testAccFwProvider_credentialsInEnvsOnly(context), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr("data.google_provider_config_plugin_framework.default", "credentials", GCLOUD_KEYFILE_JSON), + ), + }, + { + // GOOGLE_APPLICATION_CREDENTIALS is used 4th + PreConfig: func() { + // unset + t.Setenv("GOOGLE_CREDENTIALS", "") + t.Setenv("GOOGLE_CLOUD_KEYFILE_JSON", "") + t.Setenv("GCLOUD_KEYFILE_JSON", "") + // set + t.Setenv("GOOGLE_APPLICATION_CREDENTIALS", GOOGLE_APPLICATION_CREDENTIALS) //used + }, + Config: testAccFwProvider_credentialsInEnvsOnly(context), + ExpectError: regexp.MustCompile("no such file or directory"), + }, + // Need a step to help post-test destroy run without error from GOOGLE_APPLICATION_CREDENTIALS + { + PreConfig: func() { + t.Setenv("GOOGLE_CREDENTIALS", GOOGLE_CREDENTIALS) + }, + Config: "// Need a step to help post-test destroy run without error", + }, + }, + }) +} + +func testAccFwProvider_credentials_emptyStringValidation(t *testing.T) { + acctest.SkipIfVcr(t) // Test doesn't interact with API + + validValue := acctest.GenerateFakeCredentialsJson("usable-json-for-this-test") + + // ensure all credentials env vars set + for _, v := range envvar.CredsEnvVars { + t.Setenv(v, validValue) + } + + context := map[string]interface{}{ + "credentials": "", // empty string used + } + + acctest.VcrTest(t, resource.TestCase{ + // No PreCheck for checking ENVs + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccFwProvider_credentialsInProviderBlock(context), + PlanOnly: true, + ExpectError: regexp.MustCompile("expected a non-empty string"), + }, + }, + }) +} + +// testAccFwProvider_credentialsInProviderBlock allows setting the credentials argument in a provider block. +// This function uses data.google_provider_config_plugin_framework because it is implemented with the plugin-framework, +// and it should be replaced with another plugin framework-implemented datasource or resource in future +func testAccFwProvider_credentialsInProviderBlock(context map[string]interface{}) string { + return acctest.Nprintf(` +provider "google" { + credentials = "%{credentials}" +} + +data "google_provider_config_plugin_framework" "default" {} + +output "credentials" { + value = data.google_provider_config_plugin_framework.default.credentials + sensitive = true +} +`, context) +} + +// testAccFwProvider_credentialsInEnvsOnly allows testing when the credentials argument +// is only supplied via ENVs +func testAccFwProvider_credentialsInEnvsOnly(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_provider_config_plugin_framework" "default" {} + +output "credentials" { + value = data.google_provider_config_plugin_framework.default.credentials + sensitive = true +} +`, context) +} diff --git a/mmv1/third_party/terraform/fwtransport/framework_config.go.erb b/mmv1/third_party/terraform/fwtransport/framework_config.go.erb index 29ac4ca7ae18..e7168256c909 100644 --- a/mmv1/third_party/terraform/fwtransport/framework_config.go.erb +++ b/mmv1/third_party/terraform/fwtransport/framework_config.go.erb @@ -33,6 +33,11 @@ import ( ) type FrameworkProviderConfig struct { + // Temporary, as we'll replace use of FrameworkProviderConfig with transport_tpg.Config soon + // transport_tpg.Config has a Credentials field, hence this change is needed + Credentials types.String + // End temporary + BillingProject types.String Client *http.Client Context context.Context @@ -97,6 +102,12 @@ func (p *FrameworkProviderConfig) LoadAndValidateFramework(ctx context.Context, p.<%= product[:definitions].name -%>BasePath = data.<%= product[:definitions].name -%>CustomEndpoint.ValueString() <% end -%> + // Temporary + p.Credentials = data.Credentials + // End temporary + + // Copy values from the ProviderModel struct containing data about the provider configuration (present only when responsing to ConfigureProvider rpc calls) + // to the FrameworkProviderConfig struct that will be passed and available to all resources/data sources p.Context = ctx p.BillingProject = data.BillingProject p.DefaultLabels = data.DefaultLabels diff --git a/mmv1/third_party/terraform/fwtransport/framework_config_test.go.erb b/mmv1/third_party/terraform/fwtransport/framework_config_test.go.erb index 4f0f39ba1194..79411167d3a3 100644 --- a/mmv1/third_party/terraform/fwtransport/framework_config_test.go.erb +++ b/mmv1/third_party/terraform/fwtransport/framework_config_test.go.erb @@ -177,167 +177,6 @@ func TestFrameworkProvider_LoadAndValidateFramework_project(t *testing.T) { } } -func TestFrameworkProvider_LoadAndValidateFramework_credentials(t *testing.T) { - - // Note: In the test function we need to set the below fields in test case's fwmodels.ProviderModel value - // this is to stop the code under test experiencing errors, and could be addressed in future refactoring. - // - ImpersonateServiceAccountDelegates: If we don't set this, we get a nil pointer exception ¯\_(ツ)_/¯ - - const pathToMissingFile string = "./this/path/doesnt/exist.json" // Doesn't exist - - cases := map[string]struct { - ConfigValues fwmodels.ProviderModel - EnvVariables map[string]string - ExpectedDataModelValue basetypes.StringValue - // ExpectedConfigStructValue not used here, as credentials info isn't stored in the config struct - ExpectError bool - }{ - "credentials can be configured as a path to a credentials JSON file": { - ConfigValues: fwmodels.ProviderModel{ - Credentials: types.StringValue(transport_tpg.TestFakeCredentialsPath), - }, - ExpectedDataModelValue: types.StringValue(transport_tpg.TestFakeCredentialsPath), - }, - "configuring credentials as a path to a non-existent file results in an error": { - ConfigValues: fwmodels.ProviderModel{ - Credentials: types.StringValue(pathToMissingFile), - }, - ExpectError: true, - }, - "credentials set in the config are not overridden by environment variables": { - ConfigValues: fwmodels.ProviderModel{ - Credentials: types.StringValue(acctest.GenerateFakeCredentialsJson("test")), - }, - EnvVariables: map[string]string{ - "GOOGLE_CREDENTIALS": acctest.GenerateFakeCredentialsJson("GOOGLE_CREDENTIALS"), - "GOOGLE_CLOUD_KEYFILE_JSON": acctest.GenerateFakeCredentialsJson("GOOGLE_CLOUD_KEYFILE_JSON"), - "GCLOUD_KEYFILE_JSON": acctest.GenerateFakeCredentialsJson("GCLOUD_KEYFILE_JSON"), - "GOOGLE_APPLICATION_CREDENTIALS": acctest.GenerateFakeCredentialsJson("GOOGLE_APPLICATION_CREDENTIALS"), - }, - ExpectedDataModelValue: types.StringValue(acctest.GenerateFakeCredentialsJson("test")), - }, - "when credentials is unset in the config, environment variables are used: GOOGLE_CREDENTIALS used first": { - ConfigValues: fwmodels.ProviderModel{ - Credentials: types.StringNull(), // unset - }, - EnvVariables: map[string]string{ - "GOOGLE_CREDENTIALS": acctest.GenerateFakeCredentialsJson("GOOGLE_CREDENTIALS"), - "GOOGLE_CLOUD_KEYFILE_JSON": acctest.GenerateFakeCredentialsJson("GOOGLE_CLOUD_KEYFILE_JSON"), - "GCLOUD_KEYFILE_JSON": acctest.GenerateFakeCredentialsJson("GCLOUD_KEYFILE_JSON"), - "GOOGLE_APPLICATION_CREDENTIALS": acctest.GenerateFakeCredentialsJson("GOOGLE_APPLICATION_CREDENTIALS"), - }, - ExpectedDataModelValue: types.StringValue(acctest.GenerateFakeCredentialsJson("GOOGLE_CREDENTIALS")), - }, - "when credentials is unset in the config, environment variables are used: GOOGLE_CLOUD_KEYFILE_JSON used second": { - ConfigValues: fwmodels.ProviderModel{ - Credentials: types.StringNull(), // unset - }, - EnvVariables: map[string]string{ - // GOOGLE_CREDENTIALS not set - "GOOGLE_CLOUD_KEYFILE_JSON": acctest.GenerateFakeCredentialsJson("GOOGLE_CLOUD_KEYFILE_JSON"), - "GCLOUD_KEYFILE_JSON": acctest.GenerateFakeCredentialsJson("GCLOUD_KEYFILE_JSON"), - "GOOGLE_APPLICATION_CREDENTIALS": acctest.GenerateFakeCredentialsJson("GOOGLE_APPLICATION_CREDENTIALS"), - }, - ExpectedDataModelValue: types.StringValue(acctest.GenerateFakeCredentialsJson("GOOGLE_CLOUD_KEYFILE_JSON")), - }, - "when credentials is unset in the config, environment variables are used: GCLOUD_KEYFILE_JSON used third": { - ConfigValues: fwmodels.ProviderModel{ - Credentials: types.StringNull(), // unset - }, - EnvVariables: map[string]string{ - // GOOGLE_CREDENTIALS not set - // GOOGLE_CLOUD_KEYFILE_JSON not set - "GCLOUD_KEYFILE_JSON": acctest.GenerateFakeCredentialsJson("GCLOUD_KEYFILE_JSON"), - "GOOGLE_APPLICATION_CREDENTIALS": acctest.GenerateFakeCredentialsJson("GOOGLE_APPLICATION_CREDENTIALS"), - }, - ExpectedDataModelValue: types.StringValue(acctest.GenerateFakeCredentialsJson("GCLOUD_KEYFILE_JSON")), - }, - "when credentials is unset in the config (and access_token unset), GOOGLE_APPLICATION_CREDENTIALS is used for auth but not to set values in the config": { - ConfigValues: fwmodels.ProviderModel{ - Credentials: types.StringNull(), // unset - }, - EnvVariables: map[string]string{ - // GOOGLE_CREDENTIALS not set - // GOOGLE_CLOUD_KEYFILE_JSON not set - // GCLOUD_KEYFILE_JSON not set - "GOOGLE_APPLICATION_CREDENTIALS": transport_tpg.TestFakeCredentialsPath, // needs to be a path to a file when used by code - }, - ExpectedDataModelValue: types.StringNull(), - }, - // Error states - "when credentials is set to an empty string in the config the value isn't ignored and results in an error": { - ConfigValues: fwmodels.ProviderModel{ - Credentials: types.StringValue(""), - }, - EnvVariables: map[string]string{ - "GOOGLE_APPLICATION_CREDENTIALS": transport_tpg.TestFakeCredentialsPath, // needs to be a path to a file when used by code - }, - ExpectError: true, - }, - // NOTE: these tests can't run in Cloud Build due to ADC locating credentials despite `GOOGLE_APPLICATION_CREDENTIALS` being unset - // See https://cloud.google.com/docs/authentication/application-default-credentials#search_order - // Also, when running these tests locally you need to run `gcloud auth application-default revoke` to ensure your machine isn't supplying ADCs - // "error returned if credentials is set as an empty string and GOOGLE_APPLICATION_CREDENTIALS is unset": { - // ConfigValues: fwmodels.ProviderModel{ - // Credentials: types.StringValue(""), - // }, - // EnvVariables: map[string]string{ - // "GOOGLE_APPLICATION_CREDENTIALS": "", - // }, - // ExpectError: true, - // }, - // "error returned if neither credentials nor access_token set in the provider config, and GOOGLE_APPLICATION_CREDENTIALS is unset": { - // EnvVariables: map[string]string{ - // "GOOGLE_APPLICATION_CREDENTIALS": "", - // }, - // ExpectError: true, - // }, - // Handling unknown values - see separate `TestFrameworkProvider_LoadAndValidateFramework_credentials_unknown` test - } - - for tn, tc := range cases { - t.Run(tn, func(t *testing.T) { - - // Arrange - acctest.UnsetTestProviderConfigEnvs(t) - acctest.SetupTestEnvs(t, tc.EnvVariables) - - ctx := context.Background() - tfVersion := "foobar" - providerversion := "999" - diags := diag.Diagnostics{} - - data := tc.ConfigValues - impersonateServiceAccountDelegates, _ := types.ListValue(types.StringType, []attr.Value{}) // empty list - data.ImpersonateServiceAccountDelegates = impersonateServiceAccountDelegates - - p := fwtransport.FrameworkProviderConfig{} - - // Act - p.LoadAndValidateFramework(ctx, &data, tfVersion, &diags, providerversion) - - // Assert - if diags.HasError() && tc.ExpectError { - return - } - if diags.HasError() && !tc.ExpectError { - for i, err := range diags.Errors() { - num := i + 1 - t.Logf("unexpected error #%d : %s : %s", num, err.Summary(), err.Detail()) - } - t.Fatalf("did not expect error, but [%d] error(s) occurred", diags.ErrorsCount()) - } - if tc.ExpectError && !diags.HasError() { - t.Fatalf("expected error, but no errors occurred") - } - if !data.Credentials.Equal(tc.ExpectedDataModelValue) { - t.Fatalf("want credentials to be `%s`, but got the value `%s`", tc.ExpectedDataModelValue, data.Credentials.String()) - } - // fwtransport.FrameworkProviderConfig does not store the credentials info, so test does not make assertions on config struct - }) - } -} - // NOTE: these tests can't run in Cloud Build due to ADC locating credentials despite `GOOGLE_APPLICATION_CREDENTIALS` being unset // See https://cloud.google.com/docs/authentication/application-default-credentials#search_order // Also, when running these tests locally you need to run `gcloud auth application-default revoke` to ensure your machine isn't supplying ADCs diff --git a/mmv1/third_party/terraform/fwtransport/go/framework_config_test.go b/mmv1/third_party/terraform/fwtransport/go/framework_config_test.go index fc7fbc8248b4..d4e34eae78bb 100644 --- a/mmv1/third_party/terraform/fwtransport/go/framework_config_test.go +++ b/mmv1/third_party/terraform/fwtransport/go/framework_config_test.go @@ -176,167 +176,6 @@ func TestFrameworkProvider_LoadAndValidateFramework_project(t *testing.T) { } } -func TestFrameworkProvider_LoadAndValidateFramework_credentials(t *testing.T) { - - // Note: In the test function we need to set the below fields in test case's fwmodels.ProviderModel value - // this is to stop the code under test experiencing errors, and could be addressed in future refactoring. - // - ImpersonateServiceAccountDelegates: If we don't set this, we get a nil pointer exception ¯\_(ツ)_/¯ - - const pathToMissingFile string = "./this/path/doesnt/exist.json" // Doesn't exist - - cases := map[string]struct { - ConfigValues fwmodels.ProviderModel - EnvVariables map[string]string - ExpectedDataModelValue basetypes.StringValue - // ExpectedConfigStructValue not used here, as credentials info isn't stored in the config struct - ExpectError bool - }{ - "credentials can be configured as a path to a credentials JSON file": { - ConfigValues: fwmodels.ProviderModel{ - Credentials: types.StringValue(transport_tpg.TestFakeCredentialsPath), - }, - ExpectedDataModelValue: types.StringValue(transport_tpg.TestFakeCredentialsPath), - }, - "configuring credentials as a path to a non-existent file results in an error": { - ConfigValues: fwmodels.ProviderModel{ - Credentials: types.StringValue(pathToMissingFile), - }, - ExpectError: true, - }, - "credentials set in the config are not overridden by environment variables": { - ConfigValues: fwmodels.ProviderModel{ - Credentials: types.StringValue(acctest.GenerateFakeCredentialsJson("test")), - }, - EnvVariables: map[string]string{ - "GOOGLE_CREDENTIALS": acctest.GenerateFakeCredentialsJson("GOOGLE_CREDENTIALS"), - "GOOGLE_CLOUD_KEYFILE_JSON": acctest.GenerateFakeCredentialsJson("GOOGLE_CLOUD_KEYFILE_JSON"), - "GCLOUD_KEYFILE_JSON": acctest.GenerateFakeCredentialsJson("GCLOUD_KEYFILE_JSON"), - "GOOGLE_APPLICATION_CREDENTIALS": acctest.GenerateFakeCredentialsJson("GOOGLE_APPLICATION_CREDENTIALS"), - }, - ExpectedDataModelValue: types.StringValue(acctest.GenerateFakeCredentialsJson("test")), - }, - "when credentials is unset in the config, environment variables are used: GOOGLE_CREDENTIALS used first": { - ConfigValues: fwmodels.ProviderModel{ - Credentials: types.StringNull(), // unset - }, - EnvVariables: map[string]string{ - "GOOGLE_CREDENTIALS": acctest.GenerateFakeCredentialsJson("GOOGLE_CREDENTIALS"), - "GOOGLE_CLOUD_KEYFILE_JSON": acctest.GenerateFakeCredentialsJson("GOOGLE_CLOUD_KEYFILE_JSON"), - "GCLOUD_KEYFILE_JSON": acctest.GenerateFakeCredentialsJson("GCLOUD_KEYFILE_JSON"), - "GOOGLE_APPLICATION_CREDENTIALS": acctest.GenerateFakeCredentialsJson("GOOGLE_APPLICATION_CREDENTIALS"), - }, - ExpectedDataModelValue: types.StringValue(acctest.GenerateFakeCredentialsJson("GOOGLE_CREDENTIALS")), - }, - "when credentials is unset in the config, environment variables are used: GOOGLE_CLOUD_KEYFILE_JSON used second": { - ConfigValues: fwmodels.ProviderModel{ - Credentials: types.StringNull(), // unset - }, - EnvVariables: map[string]string{ - // GOOGLE_CREDENTIALS not set - "GOOGLE_CLOUD_KEYFILE_JSON": acctest.GenerateFakeCredentialsJson("GOOGLE_CLOUD_KEYFILE_JSON"), - "GCLOUD_KEYFILE_JSON": acctest.GenerateFakeCredentialsJson("GCLOUD_KEYFILE_JSON"), - "GOOGLE_APPLICATION_CREDENTIALS": acctest.GenerateFakeCredentialsJson("GOOGLE_APPLICATION_CREDENTIALS"), - }, - ExpectedDataModelValue: types.StringValue(acctest.GenerateFakeCredentialsJson("GOOGLE_CLOUD_KEYFILE_JSON")), - }, - "when credentials is unset in the config, environment variables are used: GCLOUD_KEYFILE_JSON used third": { - ConfigValues: fwmodels.ProviderModel{ - Credentials: types.StringNull(), // unset - }, - EnvVariables: map[string]string{ - // GOOGLE_CREDENTIALS not set - // GOOGLE_CLOUD_KEYFILE_JSON not set - "GCLOUD_KEYFILE_JSON": acctest.GenerateFakeCredentialsJson("GCLOUD_KEYFILE_JSON"), - "GOOGLE_APPLICATION_CREDENTIALS": acctest.GenerateFakeCredentialsJson("GOOGLE_APPLICATION_CREDENTIALS"), - }, - ExpectedDataModelValue: types.StringValue(acctest.GenerateFakeCredentialsJson("GCLOUD_KEYFILE_JSON")), - }, - "when credentials is unset in the config (and access_token unset), GOOGLE_APPLICATION_CREDENTIALS is used for auth but not to set values in the config": { - ConfigValues: fwmodels.ProviderModel{ - Credentials: types.StringNull(), // unset - }, - EnvVariables: map[string]string{ - // GOOGLE_CREDENTIALS not set - // GOOGLE_CLOUD_KEYFILE_JSON not set - // GCLOUD_KEYFILE_JSON not set - "GOOGLE_APPLICATION_CREDENTIALS": transport_tpg.TestFakeCredentialsPath, // needs to be a path to a file when used by code - }, - ExpectedDataModelValue: types.StringNull(), - }, - // Error states - "when credentials is set to an empty string in the config the value isn't ignored and results in an error": { - ConfigValues: fwmodels.ProviderModel{ - Credentials: types.StringValue(""), - }, - EnvVariables: map[string]string{ - "GOOGLE_APPLICATION_CREDENTIALS": transport_tpg.TestFakeCredentialsPath, // needs to be a path to a file when used by code - }, - ExpectError: true, - }, - // NOTE: these tests can't run in Cloud Build due to ADC locating credentials despite `GOOGLE_APPLICATION_CREDENTIALS` being unset - // See https://cloud.google.com/docs/authentication/application-default-credentials#search_order - // Also, when running these tests locally you need to run `gcloud auth application-default revoke` to ensure your machine isn't supplying ADCs - // "error returned if credentials is set as an empty string and GOOGLE_APPLICATION_CREDENTIALS is unset": { - // ConfigValues: fwmodels.ProviderModel{ - // Credentials: types.StringValue(""), - // }, - // EnvVariables: map[string]string{ - // "GOOGLE_APPLICATION_CREDENTIALS": "", - // }, - // ExpectError: true, - // }, - // "error returned if neither credentials nor access_token set in the provider config, and GOOGLE_APPLICATION_CREDENTIALS is unset": { - // EnvVariables: map[string]string{ - // "GOOGLE_APPLICATION_CREDENTIALS": "", - // }, - // ExpectError: true, - // }, - // Handling unknown values - see separate `TestFrameworkProvider_LoadAndValidateFramework_credentials_unknown` test - } - - for tn, tc := range cases { - t.Run(tn, func(t *testing.T) { - - // Arrange - acctest.UnsetTestProviderConfigEnvs(t) - acctest.SetupTestEnvs(t, tc.EnvVariables) - - ctx := context.Background() - tfVersion := "foobar" - providerversion := "999" - diags := diag.Diagnostics{} - - data := tc.ConfigValues - impersonateServiceAccountDelegates, _ := types.ListValue(types.StringType, []attr.Value{}) // empty list - data.ImpersonateServiceAccountDelegates = impersonateServiceAccountDelegates - - p := fwtransport.FrameworkProviderConfig{} - - // Act - p.LoadAndValidateFramework(ctx, &data, tfVersion, &diags, providerversion) - - // Assert - if diags.HasError() && tc.ExpectError { - return - } - if diags.HasError() && !tc.ExpectError { - for i, err := range diags.Errors() { - num := i + 1 - t.Logf("unexpected error #%d : %s : %s", num, err.Summary(), err.Detail()) - } - t.Fatalf("did not expect error, but [%d] error(s) occurred", diags.ErrorsCount()) - } - if tc.ExpectError && !diags.HasError() { - t.Fatalf("expected error, but no errors occurred") - } - if !data.Credentials.Equal(tc.ExpectedDataModelValue) { - t.Fatalf("want credentials to be `%s`, but got the value `%s`", tc.ExpectedDataModelValue, data.Credentials.String()) - } - // fwtransport.FrameworkProviderConfig does not store the credentials info, so test does not make assertions on config struct - }) - } -} - // NOTE: these tests can't run in Cloud Build due to ADC locating credentials despite `GOOGLE_APPLICATION_CREDENTIALS` being unset // See https://cloud.google.com/docs/authentication/application-default-credentials#search_order // Also, when running these tests locally you need to run `gcloud auth application-default revoke` to ensure your machine isn't supplying ADCs diff --git a/mmv1/third_party/terraform/provider/data_source_provider_config_sdk.go b/mmv1/third_party/terraform/provider/data_source_provider_config_sdk.go new file mode 100644 index 000000000000..005ea6f36a08 --- /dev/null +++ b/mmv1/third_party/terraform/provider/data_source_provider_config_sdk.go @@ -0,0 +1,164 @@ +package provider + +import ( + "crypto/sha1" + "encoding/base64" + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceGoogleProviderConfigSdk() *schema.Resource { + return &schema.Resource{ + DeprecationMessage: "Data source google_provider_config_sdk is intended to be used only in acceptance tests for the provider. Instead, please use the google_client_config data source to access provider configuration details, or open a GitHub issue requesting new features in that datasource. Please go to: https://github.com/hashicorp/terraform-provider-google/issues/new/choose", + Read: dataSourceClientConfigRead, + Schema: map[string]*schema.Schema{ + // Start of user inputs + "access_token": { + Type: schema.TypeString, + Computed: true, + Sensitive: true, + }, + "credentials": { + Type: schema.TypeString, + Computed: true, + Sensitive: true, + }, + "impersonate_service_account": { + Type: schema.TypeString, + Computed: true, + }, + "impersonate_service_account_delegates": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "project": { + Type: schema.TypeString, + Computed: true, + }, + "region": { + Type: schema.TypeString, + Computed: true, + }, + "billing_project": { + Type: schema.TypeString, + Computed: true, + }, + "zone": { + Type: schema.TypeString, + Computed: true, + }, + "universe_domain": { + Type: schema.TypeString, + Computed: true, + }, + "scopes": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "user_project_override": { + Type: schema.TypeBool, + Computed: true, + }, + "request_reason": { + Type: schema.TypeString, + Computed: true, + }, + "request_timeout": { + Type: schema.TypeString, + Computed: true, + }, + "default_labels": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "add_terraform_attribution_label": { + Type: schema.TypeBool, + Computed: true, + }, + "terraform_attribution_label_addition_strategy": { + Type: schema.TypeString, + Computed: true, + }, + // End of user inputs + + // Note - this data source excludes the default and custom endpoints for individual services + + // Start of values set during provider configuration + "user_agent": { + Type: schema.TypeString, + Computed: true, + }, + // End of values set during provider configuration + }, + } +} + +func dataSourceClientConfigRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + if err := d.Set("access_token", config.AccessToken); err != nil { + return fmt.Errorf("error setting access_token: %s", err) + } + if err := d.Set("credentials", config.Credentials); err != nil { + return fmt.Errorf("error setting credentials: %s", err) + } + if err := d.Set("impersonate_service_account", config.ImpersonateServiceAccount); err != nil { + return fmt.Errorf("error setting impersonate_service_account: %s", err) + } + if err := d.Set("impersonate_service_account_delegates", config.ImpersonateServiceAccountDelegates); err != nil { + return fmt.Errorf("error setting impersonate_service_account_delegates: %s", err) + } + if err := d.Set("project", config.Project); err != nil { + return fmt.Errorf("error setting project: %s", err) + } + if err := d.Set("region", config.Region); err != nil { + return fmt.Errorf("error setting region: %s", err) + } + if err := d.Set("billing_project", config.BillingProject); err != nil { + return fmt.Errorf("error setting billing_project: %s", err) + } + if err := d.Set("zone", config.Zone); err != nil { + return fmt.Errorf("error setting zone: %s", err) + } + if err := d.Set("universe_domain", config.UniverseDomain); err != nil { + return fmt.Errorf("error setting universe_domain: %s", err) + } + if err := d.Set("scopes", config.Scopes); err != nil { + return fmt.Errorf("error setting scopes: %s", err) + } + if err := d.Set("user_project_override", config.UserProjectOverride); err != nil { + return fmt.Errorf("error setting user_project_override: %s", err) + } + if err := d.Set("request_reason", config.RequestReason); err != nil { + return fmt.Errorf("error setting request_reason: %s", err) + } + if err := d.Set("request_timeout", config.RequestTimeout.String()); err != nil { + return fmt.Errorf("error setting request_timeout: %s", err) + } + if err := d.Set("default_labels", config.DefaultLabels); err != nil { + return fmt.Errorf("error setting default_labels: %s", err) + } + if err := d.Set("add_terraform_attribution_label", config.AddTerraformAttributionLabel); err != nil { + return fmt.Errorf("error setting add_terraform_attribution_label: %s", err) + } + if err := d.Set("terraform_attribution_label_addition_strategy", config.TerraformAttributionLabelAdditionStrategy); err != nil { + return fmt.Errorf("error setting terraform_attribution_label_addition_strategy: %s", err) + } + if err := d.Set("user_agent", config.UserAgent); err != nil { + return fmt.Errorf("error setting user_agent: %s", err) + } + + // Id is a hash of the total transport.Config struct + configString := []byte(fmt.Sprintf("%#v", config)) + hasher := sha1.New() + hasher.Write(configString) + sha := base64.URLEncoding.EncodeToString(hasher.Sum(nil)) + d.SetId(string(sha)) + + return nil +} diff --git a/mmv1/third_party/terraform/provider/provider_credentials_test.go b/mmv1/third_party/terraform/provider/provider_credentials_test.go new file mode 100644 index 000000000000..a7e6e51d8114 --- /dev/null +++ b/mmv1/third_party/terraform/provider/provider_credentials_test.go @@ -0,0 +1,269 @@ +package provider_test + +import ( + "regexp" + "testing" + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +// TestAccSdkProvider_credentials is a series of acc tests asserting how the SDK provider handles credentials arguments +// It is SDK specific because the HCL used provisions SDK-implemented resources +// It is a counterpart to TestAccFwProvider_credentials +func TestAccSdkProvider_credentials(t *testing.T) { + testCases := map[string]func(t *testing.T){ + "credentials can be configured as a path to a credentials JSON file": testAccSdkProvider_credentials_validJsonFilePath, + "configuring credentials as a path to a non-existent file results in an error": testAccSdkProvider_credentials_badJsonFilepathCausesError, + "config takes precedence over environment variables": testAccSdkProvider_credentials_configPrecedenceOverEnvironmentVariables, + "when credentials is unset in the config, environment variables are used in a given order": testAccSdkProvider_credentials_precedenceOrderEnvironmentVariables, // GOOGLE_CREDENTIALS, GOOGLE_CLOUD_KEYFILE_JSON, GCLOUD_KEYFILE_JSON, GOOGLE_APPLICATION_CREDENTIALS + "when credentials is set to an empty string in the config the value isn't ignored and results in an error": testAccSdkProvider_credentials_emptyStringValidation, + } + + for name, tc := range testCases { + // shadow the tc variable into scope so that when + // the loop continues, if t.Run hasn't executed tc(t) + // yet, we don't have a race condition + // see https://github.com/golang/go/wiki/CommonMistakes#using-goroutines-on-loop-iterator-variables + tc := tc + t.Run(name, func(t *testing.T) { + tc(t) + }) + } +} + +func testAccSdkProvider_credentials_validJsonFilePath(t *testing.T) { + acctest.SkipIfVcr(t) // Test doesn't interact with API + + // unset all credentials env vars + for _, v := range envvar.CredsEnvVars { + t.Setenv(v, "") + } + + credentials := transport_tpg.TestFakeCredentialsPath + + context := map[string]interface{}{ + "credentials": credentials, + } + + acctest.VcrTest(t, resource.TestCase{ + // No PreCheck for checking ENVs + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + // Credentials set as what we expect + Config: testAccSdkProvider_credentialsInProviderBlock(context), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr("data.google_provider_config_sdk.default", "credentials", credentials), + ), + }, + }, + }) +} + +func testAccSdkProvider_credentials_badJsonFilepathCausesError(t *testing.T) { + acctest.SkipIfVcr(t) // Test doesn't interact with API + + // unset all credentials env vars + for _, v := range envvar.CredsEnvVars { + t.Setenv(v, "") + } + + pathToMissingFile := "./this/path/does/not/exist.json" // Doesn't exist + + context := map[string]interface{}{ + "credentials": pathToMissingFile, + } + + acctest.VcrTest(t, resource.TestCase{ + // No PreCheck for checking ENVs + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + // Apply-time error due to the file not existing + Config: testAccSdkProvider_credentialsInProviderBlock(context), + PlanOnly: true, + ExpectError: regexp.MustCompile("JSON credentials are not valid"), + }, + }, + }) +} + +func testAccSdkProvider_credentials_configPrecedenceOverEnvironmentVariables(t *testing.T) { + acctest.SkipIfVcr(t) // Test doesn't interact with API + + credentials := envvar.GetTestCredsFromEnv() + + // ensure all possible credentials env vars set; show they aren't used + for _, v := range envvar.CredsEnvVars { + t.Setenv(v, credentials) + } + + pathToMissingFile := "./this/path/does/not/exist.json" // Doesn't exist + + context := map[string]interface{}{ + "credentials": pathToMissingFile, + } + + acctest.VcrTest(t, resource.TestCase{ + // No PreCheck for checking ENVs + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + // Apply-time error; bad value in config is used over of good values in ENVs + Config: testAccSdkProvider_credentialsInProviderBlock(context), + PlanOnly: true, + ExpectError: regexp.MustCompile("JSON credentials are not valid"), + }, + }, + }) +} + +func testAccSdkProvider_credentials_precedenceOrderEnvironmentVariables(t *testing.T) { + acctest.SkipIfVcr(t) // Test doesn't interact with API + /* + These are all the ENVs for credentials, and they are in order of precedence. + GOOGLE_CREDENTIALS + GOOGLE_CLOUD_KEYFILE_JSON + GCLOUD_KEYFILE_JSON + GOOGLE_APPLICATION_CREDENTIALS + GOOGLE_USE_DEFAULT_CREDENTIALS + */ + + GOOGLE_CREDENTIALS := acctest.GenerateFakeCredentialsJson("GOOGLE_CREDENTIALS") + GOOGLE_CLOUD_KEYFILE_JSON := acctest.GenerateFakeCredentialsJson("GOOGLE_CLOUD_KEYFILE_JSON") + GCLOUD_KEYFILE_JSON := acctest.GenerateFakeCredentialsJson("GCLOUD_KEYFILE_JSON") + GOOGLE_APPLICATION_CREDENTIALS := "./fake/file/path/nonexistent/a/credentials.json" // GOOGLE_APPLICATION_CREDENTIALS needs to be a path, not JSON + + context := map[string]interface{}{} + + acctest.VcrTest(t, resource.TestCase{ + // No PreCheck for checking ENVs + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + // GOOGLE_CREDENTIALS is used 1st if set + PreConfig: func() { + t.Setenv("GOOGLE_CREDENTIALS", GOOGLE_CREDENTIALS) //used + t.Setenv("GOOGLE_CLOUD_KEYFILE_JSON", GOOGLE_CLOUD_KEYFILE_JSON) + t.Setenv("GCLOUD_KEYFILE_JSON", GCLOUD_KEYFILE_JSON) + t.Setenv("GOOGLE_APPLICATION_CREDENTIALS", GOOGLE_APPLICATION_CREDENTIALS) + }, + Config: testAccSdkProvider_credentialsInEnvsOnly(context), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr("data.google_provider_config_sdk.default", "credentials", GOOGLE_CREDENTIALS), + ), + }, + { + // GOOGLE_CLOUD_KEYFILE_JSON is used 2nd + PreConfig: func() { + // unset + t.Setenv("GOOGLE_CREDENTIALS", "") + // set + t.Setenv("GOOGLE_CLOUD_KEYFILE_JSON", GOOGLE_CLOUD_KEYFILE_JSON) //used + t.Setenv("GCLOUD_KEYFILE_JSON", GCLOUD_KEYFILE_JSON) + t.Setenv("GOOGLE_APPLICATION_CREDENTIALS", GOOGLE_APPLICATION_CREDENTIALS) + + }, + Config: testAccSdkProvider_credentialsInEnvsOnly(context), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr("data.google_provider_config_sdk.default", "credentials", GOOGLE_CLOUD_KEYFILE_JSON), + ), + }, + { + // GOOGLE_CLOUD_KEYFILE_JSON is used 3rd + PreConfig: func() { + // unset + t.Setenv("GOOGLE_CREDENTIALS", "") + t.Setenv("GOOGLE_CLOUD_KEYFILE_JSON", "") + // set + t.Setenv("GCLOUD_KEYFILE_JSON", GCLOUD_KEYFILE_JSON) //used + t.Setenv("GOOGLE_APPLICATION_CREDENTIALS", GOOGLE_APPLICATION_CREDENTIALS) + }, + Config: testAccSdkProvider_credentialsInEnvsOnly(context), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr("data.google_provider_config_sdk.default", "credentials", GCLOUD_KEYFILE_JSON), + ), + }, + { + // GOOGLE_APPLICATION_CREDENTIALS is used 4th + PreConfig: func() { + // unset + t.Setenv("GOOGLE_CREDENTIALS", "") + t.Setenv("GOOGLE_CLOUD_KEYFILE_JSON", "") + t.Setenv("GCLOUD_KEYFILE_JSON", "") + // set + t.Setenv("GOOGLE_APPLICATION_CREDENTIALS", GOOGLE_APPLICATION_CREDENTIALS) //used + }, + Config: testAccSdkProvider_credentialsInEnvsOnly(context), + ExpectError: regexp.MustCompile("no such file or directory"), + }, + // Need a step to help post-test destroy run without error from GOOGLE_APPLICATION_CREDENTIALS + { + PreConfig: func() { + t.Setenv("GOOGLE_CREDENTIALS", GOOGLE_CREDENTIALS) + }, + Config: "// Need a step to help post-test destroy run without error", + }, + }, + }) +} + +func testAccSdkProvider_credentials_emptyStringValidation(t *testing.T) { + acctest.SkipIfVcr(t) // Test doesn't interact with API + + credentials := envvar.GetTestCredsFromEnv() + + // ensure all credentials env vars set + for _, v := range envvar.CredsEnvVars { + t.Setenv(v, credentials) + } + + context := map[string]interface{}{ + "credentials": "", // empty string used + } + + acctest.VcrTest(t, resource.TestCase{ + // No PreCheck for checking ENVs + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccSdkProvider_credentialsInProviderBlock(context), + PlanOnly: true, + ExpectError: regexp.MustCompile("expected a non-empty string"), + }, + }, + }) +} + +// testAccSdkProvider_credentialsInProviderBlock allows setting the credentials argument in a provider block. +// This function uses data.google_provider_config_sdk because it is implemented with the SDKv2 +func testAccSdkProvider_credentialsInProviderBlock(context map[string]interface{}) string { + return acctest.Nprintf(` +provider "google" { + credentials = "%{credentials}" +} + +data "google_provider_config_sdk" "default" {} + +output "credentials" { + value = data.google_provider_config_sdk.default.credentials + sensitive = true +} +`, context) +} + +// testAccSdkProvider_credentialsInEnvsOnly allows testing when the credentials argument +// is only supplied via ENVs +func testAccSdkProvider_credentialsInEnvsOnly(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_provider_config_sdk" "default" {} + +output "credentials" { + value = data.google_provider_config_sdk.default.credentials + sensitive = true +} +`, context) +} diff --git a/mmv1/third_party/terraform/provider/provider_internal_test.go b/mmv1/third_party/terraform/provider/provider_internal_test.go index 5bf88ff03983..4b10ff1c55a6 100644 --- a/mmv1/third_party/terraform/provider/provider_internal_test.go +++ b/mmv1/third_party/terraform/provider/provider_internal_test.go @@ -135,168 +135,6 @@ func TestProvider_ValidateEmptyStrings(t *testing.T) { } } -func TestProvider_ProviderConfigure_credentials(t *testing.T) { - - const pathToMissingFile string = "./this/path/doesnt/exist.json" // Doesn't exist - - cases := map[string]struct { - ConfigValues map[string]interface{} - EnvVariables map[string]string - ExpectError bool - ExpectFieldUnset bool - ExpectedSchemaValue string - ExpectedConfigValue string - }{ - "credentials can be configured as a path to a credentials JSON file": { - ConfigValues: map[string]interface{}{ - "credentials": transport_tpg.TestFakeCredentialsPath, - }, - EnvVariables: map[string]string{}, - ExpectedSchemaValue: transport_tpg.TestFakeCredentialsPath, - ExpectedConfigValue: transport_tpg.TestFakeCredentialsPath, - }, - "configuring credentials as a path to a non-existant file results in an error": { - ConfigValues: map[string]interface{}{ - "credentials": pathToMissingFile, - }, - ExpectError: true, - ExpectedSchemaValue: pathToMissingFile, - ExpectedConfigValue: pathToMissingFile, - }, - "credentials set in the config are not overridden by environment variables": { - ConfigValues: map[string]interface{}{ - "credentials": acctest.GenerateFakeCredentialsJson("test"), - }, - EnvVariables: map[string]string{ - "GOOGLE_CREDENTIALS": acctest.GenerateFakeCredentialsJson("GOOGLE_CREDENTIALS"), - "GOOGLE_CLOUD_KEYFILE_JSON": acctest.GenerateFakeCredentialsJson("GOOGLE_CLOUD_KEYFILE_JSON"), - "GCLOUD_KEYFILE_JSON": acctest.GenerateFakeCredentialsJson("GCLOUD_KEYFILE_JSON"), - "GOOGLE_APPLICATION_CREDENTIALS": acctest.GenerateFakeCredentialsJson("GOOGLE_APPLICATION_CREDENTIALS"), - }, - ExpectedSchemaValue: acctest.GenerateFakeCredentialsJson("test"), - ExpectedConfigValue: acctest.GenerateFakeCredentialsJson("test"), - }, - "when credentials is unset in the config, environment variables are used: GOOGLE_CREDENTIALS used first": { - EnvVariables: map[string]string{ - "GOOGLE_CREDENTIALS": acctest.GenerateFakeCredentialsJson("GOOGLE_CREDENTIALS"), - "GOOGLE_CLOUD_KEYFILE_JSON": acctest.GenerateFakeCredentialsJson("GOOGLE_CLOUD_KEYFILE_JSON"), - "GCLOUD_KEYFILE_JSON": acctest.GenerateFakeCredentialsJson("GCLOUD_KEYFILE_JSON"), - "GOOGLE_APPLICATION_CREDENTIALS": acctest.GenerateFakeCredentialsJson("GOOGLE_APPLICATION_CREDENTIALS"), - }, - ExpectedSchemaValue: "", - ExpectedConfigValue: acctest.GenerateFakeCredentialsJson("GOOGLE_CREDENTIALS"), - }, - "when credentials is unset in the config, environment variables are used: GOOGLE_CLOUD_KEYFILE_JSON used second": { - EnvVariables: map[string]string{ - // GOOGLE_CREDENTIALS not set - "GOOGLE_CLOUD_KEYFILE_JSON": acctest.GenerateFakeCredentialsJson("GOOGLE_CLOUD_KEYFILE_JSON"), - "GCLOUD_KEYFILE_JSON": acctest.GenerateFakeCredentialsJson("GCLOUD_KEYFILE_JSON"), - "GOOGLE_APPLICATION_CREDENTIALS": acctest.GenerateFakeCredentialsJson("GOOGLE_APPLICATION_CREDENTIALS"), - }, - ExpectedSchemaValue: "", - ExpectedConfigValue: acctest.GenerateFakeCredentialsJson("GOOGLE_CLOUD_KEYFILE_JSON"), - }, - "when credentials is unset in the config, environment variables are used: GCLOUD_KEYFILE_JSON used third": { - EnvVariables: map[string]string{ - // GOOGLE_CREDENTIALS not set - // GOOGLE_CLOUD_KEYFILE_JSON not set - "GCLOUD_KEYFILE_JSON": acctest.GenerateFakeCredentialsJson("GCLOUD_KEYFILE_JSON"), - "GOOGLE_APPLICATION_CREDENTIALS": acctest.GenerateFakeCredentialsJson("GOOGLE_APPLICATION_CREDENTIALS"), - }, - ExpectedSchemaValue: "", - ExpectedConfigValue: acctest.GenerateFakeCredentialsJson("GCLOUD_KEYFILE_JSON"), - }, - "when credentials is unset in the config (and access_token unset), GOOGLE_APPLICATION_CREDENTIALS is used for auth but not to set values in the config": { - EnvVariables: map[string]string{ - "GOOGLE_APPLICATION_CREDENTIALS": transport_tpg.TestFakeCredentialsPath, // needs to be a path to a file when used - }, - ExpectFieldUnset: true, - ExpectedSchemaValue: "", - }, - // Handling empty strings in config - "when credentials is set to an empty string in the config (and access_token unset), GOOGLE_APPLICATION_CREDENTIALS is used": { - ConfigValues: map[string]interface{}{ - "credentials": "", - }, - EnvVariables: map[string]string{ - "GOOGLE_APPLICATION_CREDENTIALS": transport_tpg.TestFakeCredentialsPath, // needs to be a path to a file when used - }, - ExpectFieldUnset: true, - ExpectedSchemaValue: "", - }, - // Error states - // NOTE: these tests can't run in Cloud Build due to ADC locating credentials despite `GOOGLE_APPLICATION_CREDENTIALS` being unset - // See https://cloud.google.com/docs/authentication/application-default-credentials#search_order - // Also, when running these tests locally you need to run `gcloud auth application-default revoke` to ensure your machine isn't supplying ADCs - // "error returned if credentials is set as an empty string and GOOGLE_APPLICATION_CREDENTIALS is unset": { - // ConfigValues: map[string]interface{}{ - // "credentials": "", - // }, - // EnvVariables: map[string]string{ - // "GOOGLE_APPLICATION_CREDENTIALS": "", - // }, - // ExpectError: true, - // }, - // "error returned if neither credentials nor access_token set in the provider config, and GOOGLE_APPLICATION_CREDENTIALS is unset": { - // EnvVariables: map[string]string{ - // "GOOGLE_APPLICATION_CREDENTIALS": "", - // }, - // ExpectError: true, - // }, - } - - for tn, tc := range cases { - t.Run(tn, func(t *testing.T) { - - // Arrange - ctx := context.Background() - acctest.UnsetTestProviderConfigEnvs(t) - acctest.SetupTestEnvs(t, tc.EnvVariables) - p := provider.Provider() - d := tpgresource.SetupTestResourceDataFromConfigMap(t, p.Schema, tc.ConfigValues) - - // Act - c, diags := provider.ProviderConfigure(ctx, d, p) - - // Assert - if diags.HasError() && !tc.ExpectError { - t.Fatalf("unexpected error(s): %#v", diags) - } - if !diags.HasError() && tc.ExpectError { - t.Fatal("expected error(s) but got none") - } - if diags.HasError() && tc.ExpectError { - v, ok := d.GetOk("credentials") - if ok { - val := v.(string) - if val != tc.ExpectedSchemaValue { - t.Fatalf("expected credentials value set in provider config data to be %s, got %s", tc.ExpectedSchemaValue, val) - } - if tc.ExpectFieldUnset { - t.Fatalf("expected credentials value to not be set in provider config data, got %s", val) - } - } - // Return early in tests where errors expected - return - } - - config := c.(*transport_tpg.Config) // Should be non-nil value, as test cases reaching this point experienced no errors - - v, ok := d.GetOk("credentials") - val := v.(string) - if ok && tc.ExpectFieldUnset { - t.Fatal("expected credentials value to be unset in provider config data") - } - if v != tc.ExpectedSchemaValue { - t.Fatalf("expected credentials value set in provider config data to be %s, got %s", tc.ExpectedSchemaValue, val) - } - if config.Credentials != tc.ExpectedConfigValue { - t.Fatalf("expected credentials value set in Config struct to be to be %s, got %s", tc.ExpectedConfigValue, config.Credentials) - } - }) - } -} - func TestProvider_ProviderConfigure_accessToken(t *testing.T) { cases := map[string]struct { diff --git a/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb b/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb index 561b463593d3..283b39390f99 100644 --- a/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb +++ b/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb @@ -231,7 +231,6 @@ var handwrittenDatasources = map[string]*schema.Resource{ "google_vmwareengine_private_cloud": vmwareengine.DataSourceVmwareenginePrivateCloud(), "google_vmwareengine_subnet": vmwareengine.DataSourceVmwareengineSubnet(), "google_vmwareengine_vcenter_credentials": vmwareengine.DataSourceVmwareengineVcenterCredentials(), - // ####### END handwritten datasources ########### } diff --git a/mmv1/third_party/terraform/provider/provider_test.go.erb b/mmv1/third_party/terraform/provider/provider_test.go.erb index 3295e541aac5..17ee8883ef85 100644 --- a/mmv1/third_party/terraform/provider/provider_test.go.erb +++ b/mmv1/third_party/terraform/provider/provider_test.go.erb @@ -181,75 +181,6 @@ func TestAccProviderIndirectUserProjectOverride(t *testing.T) { }) } -func TestAccProviderCredentialsEmptyString(t *testing.T) { - // Test is not parallel because ENVs are set. - // Need to skip VCR as this test downloads providers from the Terraform Registry - acctest.SkipIfVcr(t) - - creds := envvar.GetTestCredsFromEnv() - project := envvar.GetTestProjectFromEnv() - t.Setenv("GOOGLE_CREDENTIALS", creds) - t.Setenv("GOOGLE_PROJECT", project) - - pid := "tf-test-" + acctest.RandString(t, 10) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - // No TestDestroy since that's not really the point of this test - Steps: []resource.TestStep{ - { - // This is a control for the other test steps; the provider block doesn't contain `credentials = ""` - Config: testAccProviderCredentials_actWithCredsFromEnv(pid), - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - PlanOnly: true, - ExpectNonEmptyPlan: true, - }, - { - // Assert that errors are expected with credentials when - // - GOOGLE_CREDENTIALS is set - // - provider block has credentials = "" - // - TPG v4.60.2 is used - // Context: this was an addidental breaking change introduced with muxing - Config: testAccProviderCredentials_actWithCredsFromEnv_emptyString(pid), - ExternalProviders: map[string]resource.ExternalProvider{ - "google": { - VersionConstraint: "4.60.2", - Source: "hashicorp/google", - }, - }, - PlanOnly: true, - ExpectNonEmptyPlan: true, - ExpectError: regexp.MustCompile(`unexpected end of JSON input`), - }, - { - // Assert that errors are NOT expected with credentials when - // - GOOGLE_CREDENTIALS is set - // - provider block has credentials = "" - // - TPG v4.84.0 is used - // Context: this was the fix for the unintended breaking change in 4.60.2 - Config: testAccProviderCredentials_actWithCredsFromEnv_emptyString(pid), - ExternalProviders: map[string]resource.ExternalProvider{ - "google": { - VersionConstraint: "4.84.0", - Source: "hashicorp/google", - }, - }, - PlanOnly: true, - ExpectNonEmptyPlan: true, - }, - { - // Validation errors are expected in 5.0.0+ - // Context: we intentionally introduced the breaking change again in 5.0.0+ - Config: testAccProviderCredentials_actWithCredsFromEnv_emptyString(pid), - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - PlanOnly: true, - ExpectNonEmptyPlan: true, - ExpectError: regexp.MustCompile(`expected a non-empty string`), - }, - }, - }) -} - func TestAccProviderEmptyStrings(t *testing.T) { t.Parallel() From 51a9f532e2f43b668a3a553723a5c2adfe6fbad4 Mon Sep 17 00:00:00 2001 From: Zhenhua Li Date: Thu, 5 Sep 2024 05:20:28 -0700 Subject: [PATCH 38/60] Go rewrite handwritten provider files (#11638) --- mmv1/products/kms/go_AutokeyConfig.yaml | 7 + .../products/siteverification/go_product.yaml | 20 + mmv1/provider/terraform.go | 2 +- mmv1/template-converter.go | 9 - .../go/kms_autokey_config.go.tmpl | 1 + .../go/cloudrunv2_service_mesh.tf.tmpl | 29 + .../go/cloudrunv2_service_mount_gcs.tf.tmpl | 2 +- .../go/cloudrunv2_service_mount_nfs.tf.tmpl | 1 - .../go/kms_autokey_config_all.tf.tmpl | 9 +- .../go/kms_autokey_config_folder.go.tmpl | 2 + .../go/kms_autokey_config_folder.go.tmpl | 4 + .../go/kms_autokey_config_folder.go.tmpl | 1 + .../go/kms_autokey_config_folder.go.tmpl | 1 + .../go/kms_autokey_config_folder.go.tmpl | 1 + .../fwmodels/go/provider_model.go.tmpl | 74 + .../fwprovider/go/framework_provider.go.tmpl | 297 ++++ .../fwtransport/go/framework_config.go.tmpl | 715 +++++++++ mmv1/third_party/terraform/go.mod.erb | 2 +- mmv1/third_party/terraform/go/go.mod | 4 +- .../terraform/provider/go/provider.go.tmpl | 416 +++++ .../go/provider_mmv1_resources.go.tmpl | 457 ++++++ .../resource_cloud_run_service_test.go.tmpl | 87 +- .../go/resource_cloud_run_v2_job_test.go.tmpl | 15 +- ...resource_cloud_run_v2_service_test.go.tmpl | 125 +- .../go/resource_compute_router_peer.go.tmpl | 4 +- .../services/container/go/node_config.go.tmpl | 1 - .../storage/go/resource_storage_bucket.go | 1 + .../sweeper/go/gcp_sweeper_test.go.tmpl | 30 + .../terraform/transport/go/config.go.tmpl | 1354 +++++++++++++++++ 29 files changed, 3623 insertions(+), 48 deletions(-) create mode 100644 mmv1/products/siteverification/go_product.yaml create mode 100644 mmv1/templates/terraform/examples/go/cloudrunv2_service_mesh.tf.tmpl create mode 100644 mmv1/templates/terraform/post_create/go/kms_autokey_config_folder.go.tmpl create mode 100644 mmv1/templates/terraform/pre_create/go/kms_autokey_config_folder.go.tmpl create mode 100644 mmv1/templates/terraform/pre_delete/go/kms_autokey_config_folder.go.tmpl create mode 100644 mmv1/templates/terraform/pre_read/go/kms_autokey_config_folder.go.tmpl create mode 100644 mmv1/templates/terraform/pre_update/go/kms_autokey_config_folder.go.tmpl create mode 100644 mmv1/third_party/terraform/fwmodels/go/provider_model.go.tmpl create mode 100644 mmv1/third_party/terraform/fwprovider/go/framework_provider.go.tmpl create mode 100644 mmv1/third_party/terraform/fwtransport/go/framework_config.go.tmpl create mode 100644 mmv1/third_party/terraform/provider/go/provider.go.tmpl create mode 100644 mmv1/third_party/terraform/provider/go/provider_mmv1_resources.go.tmpl create mode 100644 mmv1/third_party/terraform/sweeper/go/gcp_sweeper_test.go.tmpl create mode 100644 mmv1/third_party/terraform/transport/go/config.go.tmpl diff --git a/mmv1/products/kms/go_AutokeyConfig.yaml b/mmv1/products/kms/go_AutokeyConfig.yaml index a7445c3ef986..5059d518a9a6 100644 --- a/mmv1/products/kms/go_AutokeyConfig.yaml +++ b/mmv1/products/kms/go_AutokeyConfig.yaml @@ -44,7 +44,13 @@ timeouts: update_minutes: 20 delete_minutes: 20 custom_code: + constants: 'templates/terraform/constants/go/autokey_config_folder_diff.go.tmpl' + pre_create: 'templates/terraform/pre_create/go/kms_autokey_config_folder.go.tmpl' + pre_read: 'templates/terraform/pre_read/go/kms_autokey_config_folder.go.tmpl' + pre_update: 'templates/terraform/pre_update/go/kms_autokey_config_folder.go.tmpl' + pre_delete: 'templates/terraform/pre_delete/go/kms_autokey_config_folder.go.tmpl' test_check_destroy: 'templates/terraform/custom_check_destroy/go/kms_autokey_config.go.tmpl' +skip_sweeper: true examples: - name: 'kms_autokey_config_all' primary_resource_id: 'example-autokeyconfig' @@ -66,6 +72,7 @@ parameters: url_param_only: true required: true immutable: true + diff_suppress_func: 'folderPrefixSuppress' properties: - name: 'keyProject' type: String diff --git a/mmv1/products/siteverification/go_product.yaml b/mmv1/products/siteverification/go_product.yaml new file mode 100644 index 000000000000..e95ef3c8ac01 --- /dev/null +++ b/mmv1/products/siteverification/go_product.yaml @@ -0,0 +1,20 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- +name: 'SiteVerification' +display_name: 'Site Verification' +versions: + - name: 'ga' + base_url: 'https://www.googleapis.com/siteVerification/v1/' +scopes: + - 'https://www.googleapis.com/auth/siteverification' diff --git a/mmv1/provider/terraform.go b/mmv1/provider/terraform.go index 834df4869501..860aeee9bd9c 100644 --- a/mmv1/provider/terraform.go +++ b/mmv1/provider/terraform.go @@ -898,7 +898,7 @@ func (t *Terraform) generateResourcesForVersion(products []*api.Product) { t.IAMResourceCount += 3 if !(iamPolicy.MinVersion != "" && iamPolicy.MinVersion < t.TargetVersionName) { - iamClassName = fmt.Sprintf("%s.Resource%s", service, object.ResourceName()) + iamClassName = fmt.Sprintf("%s.%s", service, object.ResourceName()) } } diff --git a/mmv1/template-converter.go b/mmv1/template-converter.go index 98575247656d..a2ea36d1fbf1 100644 --- a/mmv1/template-converter.go +++ b/mmv1/template-converter.go @@ -701,15 +701,6 @@ func checkExceptionList(filePath string) bool { "iam/example_config_body/privateca", "iam/example_config_body/vertex_ai", "iam/example_config_body/app_engine_", - - // TODO: remove the following files from the exception list after all of the services are migrated to Go - // It will generate diffs when partial services are migrated. - "provider/provider_mmv1_resources.go.erb", - "provider/provider.go.erb", - "fwmodels/provider_model.go.erb", - "fwprovider/framework_provider.go.erb", - "fwtransport/framework_config.go.erb", - "sweeper/gcp_sweeper_test.go.erb", "transport/config.go.erb", } diff --git a/mmv1/templates/terraform/custom_check_destroy/go/kms_autokey_config.go.tmpl b/mmv1/templates/terraform/custom_check_destroy/go/kms_autokey_config.go.tmpl index ad28e276546e..8e801cb0449f 100644 --- a/mmv1/templates/terraform/custom_check_destroy/go/kms_autokey_config.go.tmpl +++ b/mmv1/templates/terraform/custom_check_destroy/go/kms_autokey_config.go.tmpl @@ -1,6 +1,7 @@ config := acctest.GoogleProviderConfig(t) url, err := tpgresource.ReplaceVarsForTest(config, rs, "{{"{{"}}KMSBasePath{{"}}"}}folders/{{"{{"}}folder{{"}}"}}/autokeyConfig") +url = strings.Replace(url, "folders/folders/", "folders/", 1) if err != nil { return err } diff --git a/mmv1/templates/terraform/examples/go/cloudrunv2_service_mesh.tf.tmpl b/mmv1/templates/terraform/examples/go/cloudrunv2_service_mesh.tf.tmpl new file mode 100644 index 000000000000..2cd98fd20a06 --- /dev/null +++ b/mmv1/templates/terraform/examples/go/cloudrunv2_service_mesh.tf.tmpl @@ -0,0 +1,29 @@ +resource "google_cloud_run_v2_service" "{{$.PrimaryResourceId}}" { + provider = google-beta + name = "{{index $.Vars "cloud_run_service_name"}}" + depends_on = [time_sleep.wait_for_mesh] + deletion_protection = false + + location = "us-central1" + launch_stage = "BETA" + + template { + containers { + image = "us-docker.pkg.dev/cloudrun/container/hello" + } + service_mesh { + mesh = google_network_services_mesh.mesh.id + } + } +} + +resource "time_sleep" "wait_for_mesh" { + depends_on = [google_network_services_mesh.mesh] + + create_duration = "1m" +} + +resource "google_network_services_mesh" "mesh" { + provider = google-beta + name = "{{index $.Vars "mesh_name"}}" +} diff --git a/mmv1/templates/terraform/examples/go/cloudrunv2_service_mount_gcs.tf.tmpl b/mmv1/templates/terraform/examples/go/cloudrunv2_service_mount_gcs.tf.tmpl index 8a2b39996a0d..f3001a8f896c 100644 --- a/mmv1/templates/terraform/examples/go/cloudrunv2_service_mount_gcs.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/cloudrunv2_service_mount_gcs.tf.tmpl @@ -3,7 +3,7 @@ resource "google_cloud_run_v2_service" "{{$.PrimaryResourceId}}" { location = "us-central1" deletion_protection = false - launch_stage = "BETA" + template { execution_environment = "EXECUTION_ENVIRONMENT_GEN2" diff --git a/mmv1/templates/terraform/examples/go/cloudrunv2_service_mount_nfs.tf.tmpl b/mmv1/templates/terraform/examples/go/cloudrunv2_service_mount_nfs.tf.tmpl index fa77b3569c35..46907ece2e15 100644 --- a/mmv1/templates/terraform/examples/go/cloudrunv2_service_mount_nfs.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/cloudrunv2_service_mount_nfs.tf.tmpl @@ -4,7 +4,6 @@ resource "google_cloud_run_v2_service" "{{$.PrimaryResourceId}}" { location = "us-central1" deletion_protection = false ingress = "INGRESS_TRAFFIC_ALL" - launch_stage = "BETA" template { execution_environment = "EXECUTION_ENVIRONMENT_GEN2" diff --git a/mmv1/templates/terraform/examples/go/kms_autokey_config_all.tf.tmpl b/mmv1/templates/terraform/examples/go/kms_autokey_config_all.tf.tmpl index 1dfef71cbf3f..aa3ad3661cc6 100644 --- a/mmv1/templates/terraform/examples/go/kms_autokey_config_all.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/kms_autokey_config_all.tf.tmpl @@ -64,7 +64,14 @@ resource "time_sleep" "wait_srv_acc_permissions" { resource "google_kms_autokey_config" "{{$.PrimaryResourceId}}" { provider = google-beta - folder = google_folder.autokms_folder.folder_id + folder = google_folder.autokms_folder.id key_project = "projects/${google_project.key_project.project_id}" depends_on = [time_sleep.wait_srv_acc_permissions] } + +# Wait delay after setting AutokeyConfig, to prevent diffs on reapply, +# because setting the config takes a little to fully propagate. +resource "time_sleep" "wait_autokey_propagation" { + create_duration = "30s" + depends_on = [google_kms_autokey_config.{{$.PrimaryResourceId}}] +} diff --git a/mmv1/templates/terraform/post_create/go/kms_autokey_config_folder.go.tmpl b/mmv1/templates/terraform/post_create/go/kms_autokey_config_folder.go.tmpl new file mode 100644 index 000000000000..7404b3c78e93 --- /dev/null +++ b/mmv1/templates/terraform/post_create/go/kms_autokey_config_folder.go.tmpl @@ -0,0 +1,2 @@ +id = strings.Replace(id, "folders/folders/", "folders/", 1) +d.SetId(id) diff --git a/mmv1/templates/terraform/pre_create/go/kms_autokey_config_folder.go.tmpl b/mmv1/templates/terraform/pre_create/go/kms_autokey_config_folder.go.tmpl new file mode 100644 index 000000000000..ffbef5e2eb69 --- /dev/null +++ b/mmv1/templates/terraform/pre_create/go/kms_autokey_config_folder.go.tmpl @@ -0,0 +1,4 @@ +url = strings.Replace(url, "folders/folders/", "folders/", 1) +folderValue := d.Get("folder").(string) +folderValue = strings.Replace(folderValue, "folders/", "", 1) +d.Set("folder", folderValue) diff --git a/mmv1/templates/terraform/pre_delete/go/kms_autokey_config_folder.go.tmpl b/mmv1/templates/terraform/pre_delete/go/kms_autokey_config_folder.go.tmpl new file mode 100644 index 000000000000..db8c46af1299 --- /dev/null +++ b/mmv1/templates/terraform/pre_delete/go/kms_autokey_config_folder.go.tmpl @@ -0,0 +1 @@ +url = strings.Replace(url, "folders/folders/", "folders/", 1) diff --git a/mmv1/templates/terraform/pre_read/go/kms_autokey_config_folder.go.tmpl b/mmv1/templates/terraform/pre_read/go/kms_autokey_config_folder.go.tmpl new file mode 100644 index 000000000000..db8c46af1299 --- /dev/null +++ b/mmv1/templates/terraform/pre_read/go/kms_autokey_config_folder.go.tmpl @@ -0,0 +1 @@ +url = strings.Replace(url, "folders/folders/", "folders/", 1) diff --git a/mmv1/templates/terraform/pre_update/go/kms_autokey_config_folder.go.tmpl b/mmv1/templates/terraform/pre_update/go/kms_autokey_config_folder.go.tmpl new file mode 100644 index 000000000000..db8c46af1299 --- /dev/null +++ b/mmv1/templates/terraform/pre_update/go/kms_autokey_config_folder.go.tmpl @@ -0,0 +1 @@ +url = strings.Replace(url, "folders/folders/", "folders/", 1) diff --git a/mmv1/third_party/terraform/fwmodels/go/provider_model.go.tmpl b/mmv1/third_party/terraform/fwmodels/go/provider_model.go.tmpl new file mode 100644 index 000000000000..252fccb53d54 --- /dev/null +++ b/mmv1/third_party/terraform/fwmodels/go/provider_model.go.tmpl @@ -0,0 +1,74 @@ +package fwmodels + +import ( + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +// ProviderModel describes the provider config data model. +type ProviderModel struct { + Credentials types.String `tfsdk:"credentials"` + AccessToken types.String `tfsdk:"access_token"` + ImpersonateServiceAccount types.String `tfsdk:"impersonate_service_account"` + ImpersonateServiceAccountDelegates types.List `tfsdk:"impersonate_service_account_delegates"` + Project types.String `tfsdk:"project"` + BillingProject types.String `tfsdk:"billing_project"` + Region types.String `tfsdk:"region"` + Zone types.String `tfsdk:"zone"` + Scopes types.List `tfsdk:"scopes"` + Batching types.List `tfsdk:"batching"` + UserProjectOverride types.Bool `tfsdk:"user_project_override"` + RequestTimeout types.String `tfsdk:"request_timeout"` + RequestReason types.String `tfsdk:"request_reason"` + UniverseDomain types.String `tfsdk:"universe_domain"` + DefaultLabels types.Map `tfsdk:"default_labels"` + AddTerraformAttributionLabel types.Bool `tfsdk:"add_terraform_attribution_label"` + TerraformAttributionLabelAdditionStrategy types.String `tfsdk:"terraform_attribution_label_addition_strategy"` + + // Generated Products +{{- range $product := $.Products }} + {{ $product.Name }}CustomEndpoint types.String `tfsdk:"{{ underscore $product.Name }}_custom_endpoint"` +{{- end }} + + // Handwritten Products / Versioned / Atypical Entries + CloudBillingCustomEndpoint types.String `tfsdk:"cloud_billing_custom_endpoint"` + ContainerCustomEndpoint types.String `tfsdk:"container_custom_endpoint"` + DataflowCustomEndpoint types.String `tfsdk:"dataflow_custom_endpoint"` + IamCredentialsCustomEndpoint types.String `tfsdk:"iam_credentials_custom_endpoint"` + ResourceManagerV3CustomEndpoint types.String `tfsdk:"resource_manager_v3_custom_endpoint"` +{{- if ne $.TargetVersionName "ga" }} + RuntimeconfigCustomEndpoint types.String `tfsdk:"runtimeconfig_custom_endpoint"` +{{- end }} + IAMCustomEndpoint types.String `tfsdk:"iam_custom_endpoint"` + TagsLocationCustomEndpoint types.String `tfsdk:"tags_location_custom_endpoint"` + + // dcl + ContainerAwsCustomEndpoint types.String `tfsdk:"container_aws_custom_endpoint"` + ContainerAzureCustomEndpoint types.String `tfsdk:"container_azure_custom_endpoint"` + + // dcl generated + ApikeysCustomEndpoint types.String `tfsdk:"apikeys_custom_endpoint"` + AssuredWorkloadsCustomEndpoint types.String `tfsdk:"assured_workloads_custom_endpoint"` + CloudBuildWorkerPoolCustomEndpoint types.String `tfsdk:"cloud_build_worker_pool_custom_endpoint"` + CloudResourceManagerCustomEndpoint types.String `tfsdk:"cloud_resource_manager_custom_endpoint"` + EventarcCustomEndpoint types.String `tfsdk:"eventarc_custom_endpoint"` + FirebaserulesCustomEndpoint types.String `tfsdk:"firebaserules_custom_endpoint"` + RecaptchaEnterpriseCustomEndpoint types.String `tfsdk:"recaptcha_enterprise_custom_endpoint"` + + GkehubFeatureCustomEndpoint types.String `tfsdk:"gkehub_feature_custom_endpoint"` +} + +type ProviderBatching struct { + SendAfter types.String `tfsdk:"send_after"` + EnableBatching types.Bool `tfsdk:"enable_batching"` +} + +var ProviderBatchingAttributes = map[string]attr.Type{ + "send_after": types.StringType, + "enable_batching": types.BoolType, +} + +// ProviderMetaModel describes the provider meta model +type ProviderMetaModel struct { + ModuleName types.String `tfsdk:"module_name"` +} diff --git a/mmv1/third_party/terraform/fwprovider/go/framework_provider.go.tmpl b/mmv1/third_party/terraform/fwprovider/go/framework_provider.go.tmpl new file mode 100644 index 000000000000..470a492f3c84 --- /dev/null +++ b/mmv1/third_party/terraform/fwprovider/go/framework_provider.go.tmpl @@ -0,0 +1,297 @@ +package fwprovider + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/function" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/provider" + "github.com/hashicorp/terraform-plugin-framework/provider/metaschema" + "github.com/hashicorp/terraform-plugin-framework/provider/schema" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + + "github.com/hashicorp/terraform-provider-google/google/functions" + "github.com/hashicorp/terraform-provider-google/google/fwmodels" + "github.com/hashicorp/terraform-provider-google/google/fwtransport" + "github.com/hashicorp/terraform-provider-google/google/services/resourcemanager" + "github.com/hashicorp/terraform-provider-google/version" + {{- if ne $.TargetVersionName "ga" }} + "github.com/hashicorp/terraform-provider-google/google/services/firebase" + {{- end }} + + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +// Ensure the implementation satisfies the expected interfaces +var ( + _ provider.ProviderWithMetaSchema = &FrameworkProvider{} + _ provider.ProviderWithFunctions = &FrameworkProvider{} +) + +// New is a helper function to simplify provider server and testing implementation. +func New() provider.ProviderWithMetaSchema { + return &FrameworkProvider{ + Version: version.ProviderVersion, + } +} + +// FrameworkProvider is the provider implementation. +type FrameworkProvider struct { + fwtransport.FrameworkProviderConfig + Version string +} + +// Metadata returns the provider type name. +func (p *FrameworkProvider) Metadata(_ context.Context, _ provider.MetadataRequest, resp *provider.MetadataResponse) { + resp.TypeName = "google" + resp.Version = p.Version +} + +// MetaSchema returns the provider meta schema. +func (p *FrameworkProvider) MetaSchema(_ context.Context, _ provider.MetaSchemaRequest, resp *provider.MetaSchemaResponse) { + resp.Schema = metaschema.Schema{ + Attributes: map[string]metaschema.Attribute{ + "module_name": metaschema.StringAttribute{ + Optional: true, + }, + }, + } +} + +// Schema defines the provider-level schema for configuration data. +func (p *FrameworkProvider) Schema(_ context.Context, _ provider.SchemaRequest, resp *provider.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "credentials": schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + stringvalidator.ConflictsWith(path.Expressions{ + path.MatchRoot("access_token"), + }...), + CredentialsValidator(), + NonEmptyStringValidator(), + }, + }, + "access_token": schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + stringvalidator.ConflictsWith(path.Expressions{ + path.MatchRoot("credentials"), + }...), + NonEmptyStringValidator(), + }, + }, + "impersonate_service_account": schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + NonEmptyStringValidator(), + }, + }, + "impersonate_service_account_delegates": schema.ListAttribute{ + Optional: true, + ElementType: types.StringType, + }, + "project": schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + NonEmptyStringValidator(), + }, + }, + "billing_project": schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + NonEmptyStringValidator(), + }, + }, + "region": schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + NonEmptyStringValidator(), + }, + }, + "zone": schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + NonEmptyStringValidator(), + }, + }, + "scopes": schema.ListAttribute{ + Optional: true, + ElementType: types.StringType, + }, + "user_project_override": schema.BoolAttribute{ + Optional: true, + }, + "request_timeout": schema.StringAttribute{ + Optional: true, + }, + "request_reason": schema.StringAttribute{ + Optional: true, + }, + "universe_domain": schema.StringAttribute{ + Optional: true, + }, + "default_labels": schema.MapAttribute{ + Optional: true, + ElementType: types.StringType, + }, + "add_terraform_attribution_label": schema.BoolAttribute{ + Optional: true, + }, + "terraform_attribution_label_addition_strategy": schema.StringAttribute{ + Optional: true, + }, + // Generated Products + {{- range $product := $.Products }} + "{{ underscore $product.Name }}_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + {{- end }} + + // Handwritten Products / Versioned / Atypical Entries + "cloud_billing_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "container_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "dataflow_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "iam_credentials_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "resource_manager_v3_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + {{- if ne $.TargetVersionName "ga" }} + "runtimeconfig_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + {{- end }} + "iam_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "tags_location_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + + // dcl + "container_aws_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + "container_azure_custom_endpoint": &schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + transport_tpg.CustomEndpointValidator(), + }, + }, + }, + Blocks: map[string]schema.Block{ + "batching": schema.ListNestedBlock{ + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + "send_after": schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + NonNegativeDurationValidator(), + }, + }, + "enable_batching": schema.BoolAttribute{ + Optional: true, + }, + }, + }, + }, + }, + } + + transport_tpg.ConfigureDCLCustomEndpointAttributesFramework(&resp.Schema) +} + +// Configure prepares an API client for data sources and resources. +func (p *FrameworkProvider) Configure(ctx context.Context, req provider.ConfigureRequest, resp *provider.ConfigureResponse) { + var data fwmodels.ProviderModel + + resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + + if resp.Diagnostics.HasError() { + return + } + + // Configuration values are now available. + p.LoadAndValidateFramework(ctx, &data, req.TerraformVersion, &resp.Diagnostics, p.Version) + if resp.Diagnostics.HasError() { + return + } + + // This is how we make provider configuration info (configured clients, default project, etc) available to resources and data sources + // implemented using the plugin-framework. The resources' Configure functions receive this data in the ConfigureRequest argument. + resp.DataSourceData = &p.FrameworkProviderConfig + resp.ResourceData = &p.FrameworkProviderConfig +} + + +// DataSources defines the data sources implemented in the provider. +func (p *FrameworkProvider) DataSources(_ context.Context) []func() datasource.DataSource { + return []func() datasource.DataSource{ + resourcemanager.NewGoogleClientConfigDataSource, + resourcemanager.NewGoogleClientOpenIDUserinfoDataSource, + {{- if ne $.TargetVersionName "ga" }} + firebase.NewGoogleFirebaseAndroidAppConfigDataSource, + firebase.NewGoogleFirebaseAppleAppConfigDataSource, + firebase.NewGoogleFirebaseWebAppConfigDataSource, + {{- end }} + } +} + +// Resources defines the resources implemented in the provider. +func (p *FrameworkProvider) Resources(_ context.Context) []func() resource.Resource { + return nil +} + +// Functions defines the provider functions implemented in the provider. +func (p *FrameworkProvider) Functions(_ context.Context) []func() function.Function { + return []func() function.Function{ + functions.NewLocationFromIdFunction, + functions.NewNameFromIdFunction, + functions.NewProjectFromIdFunction, + functions.NewRegionFromIdFunction, + functions.NewRegionFromZoneFunction, + functions.NewZoneFromIdFunction, + } +} diff --git a/mmv1/third_party/terraform/fwtransport/go/framework_config.go.tmpl b/mmv1/third_party/terraform/fwtransport/go/framework_config.go.tmpl new file mode 100644 index 000000000000..d148d7e0300f --- /dev/null +++ b/mmv1/third_party/terraform/fwtransport/go/framework_config.go.tmpl @@ -0,0 +1,715 @@ +package fwtransport + +import ( + "context" + "fmt" + "net/http" + "os" + "regexp" + "strconv" + "time" + + "golang.org/x/oauth2" + googleoauth "golang.org/x/oauth2/google" + + "google.golang.org/api/option" + "google.golang.org/api/transport" + "google.golang.org/grpc" + + "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging" + + "github.com/hashicorp/terraform-provider-google/google/fwmodels" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" + + grpc_logrus "github.com/grpc-ecosystem/go-grpc-middleware/logging/logrus" + "github.com/sirupsen/logrus" +) + +type FrameworkProviderConfig struct { + BillingProject types.String + Client *http.Client + Context context.Context + gRPCLoggingOptions []option.ClientOption + PollInterval time.Duration + Project types.String + Region types.String + Zone types.String + RequestBatcherIam *transport_tpg.RequestBatcher + RequestBatcherServiceUsage *transport_tpg.RequestBatcher + Scopes types.List + TokenSource oauth2.TokenSource + UniverseDomain types.String + UserAgent string + UserProjectOverride types.Bool + DefaultLabels types.Map + + // paths for client setup + {{- range $product := $.Products }} + {{ $product.Name }}BasePath string + {{- end }} +} + +// LoadAndValidateFramework handles the bulk of configuring the provider +// it is pulled out so that we can manually call this from our testing provider as well +func (p *FrameworkProviderConfig) LoadAndValidateFramework(ctx context.Context, data *fwmodels.ProviderModel, tfVersion string, diags *diag.Diagnostics, providerversion string) { + + // Set defaults if needed + p.HandleDefaults(ctx, data, diags) + if diags.HasError() { + return + } + + p.Context = ctx + + // Handle User Agent string + p.UserAgent = CompileUserAgentString(ctx, "terraform-provider-google{{- if ne $.TargetVersionName "ga" -}}-{{$.TargetVersionName}}{{- end }}", tfVersion, providerversion) + // opt in extension for adding to the User-Agent header + if ext := os.Getenv("GOOGLE_TERRAFORM_USERAGENT_EXTENSION"); ext != "" { + ua := p.UserAgent + p.UserAgent = fmt.Sprintf("%s %s", ua, ext) + } + + // Set up client configuration + p.SetupClient(ctx, *data, diags) + if diags.HasError() { + return + } + + // gRPC Logging setup + p.SetupGrpcLogging() + + // Handle Batching Config + batchingConfig := GetBatchingConfig(ctx, data.Batching, diags) + if diags.HasError() { + return + } + + // Setup Base Paths for clients + // Generated products + {{- range $product := $.Products }} + p.{{ $product.Name }}BasePath = data.{{ $product.Name }}CustomEndpoint.ValueString() + {{- end }} + + p.Context = ctx + p.BillingProject = data.BillingProject + p.DefaultLabels = data.DefaultLabels + p.Project = data.Project + p.Region = GetRegionFromRegionSelfLink(data.Region) + p.Scopes = data.Scopes + p.Zone = data.Zone + p.UserProjectOverride = data.UserProjectOverride + p.PollInterval = 10 * time.Second + p.UniverseDomain = data.UniverseDomain + p.RequestBatcherServiceUsage = transport_tpg.NewRequestBatcher("Service Usage", ctx, batchingConfig) + p.RequestBatcherIam = transport_tpg.NewRequestBatcher("IAM", ctx, batchingConfig) +} + +// HandleDefaults will handle all the defaults necessary in the provider +func (p *FrameworkProviderConfig) HandleDefaults(ctx context.Context, data *fwmodels.ProviderModel, diags *diag.Diagnostics) { + if (data.AccessToken.IsNull() || data.AccessToken.IsUnknown()) && (data.Credentials.IsNull() || data.Credentials.IsUnknown()) { + credentials := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_CREDENTIALS", + "GOOGLE_CLOUD_KEYFILE_JSON", + "GCLOUD_KEYFILE_JSON", + }, nil) + + if credentials != nil { + data.Credentials = types.StringValue(credentials.(string)) + } + + accessToken := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_OAUTH_ACCESS_TOKEN", + }, nil) + + if accessToken != nil { + data.AccessToken = types.StringValue(accessToken.(string)) + } + } + + if (data.ImpersonateServiceAccount.IsNull() || data.ImpersonateServiceAccount.IsUnknown()) && os.Getenv("GOOGLE_IMPERSONATE_SERVICE_ACCOUNT") != "" { + data.ImpersonateServiceAccount = types.StringValue(os.Getenv("GOOGLE_IMPERSONATE_SERVICE_ACCOUNT")) + } + + if data.Project.IsNull() || data.Project.IsUnknown() { + project := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_PROJECT", + "GOOGLE_CLOUD_PROJECT", + "GCLOUD_PROJECT", + "CLOUDSDK_CORE_PROJECT", + }, nil) + if project != nil { + data.Project = types.StringValue(project.(string)) + } + } + + if data.BillingProject.IsNull() && os.Getenv("GOOGLE_BILLING_PROJECT") != "" { + data.BillingProject = types.StringValue(os.Getenv("GOOGLE_BILLING_PROJECT")) + } + + if data.Region.IsNull() || data.Region.IsUnknown() { + region := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_REGION", + "GCLOUD_REGION", + "CLOUDSDK_COMPUTE_REGION", + }, nil) + + if region != nil { + data.Region = types.StringValue(region.(string)) + } + } + + if data.Zone.IsNull() || data.Zone.IsUnknown() { + zone := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_ZONE", + "GCLOUD_ZONE", + "CLOUDSDK_COMPUTE_ZONE", + }, nil) + + if zone != nil { + data.Zone = types.StringValue(zone.(string)) + } + } + + if len(data.Scopes.Elements()) == 0 { + var d diag.Diagnostics + data.Scopes, d = types.ListValueFrom(ctx, types.StringType, transport_tpg.DefaultClientScopes) + diags.Append(d...) + if diags.HasError() { + return + } + } + + if !data.Batching.IsNull() && !data.Batching.IsUnknown() { + var pbConfigs []fwmodels.ProviderBatching + d := data.Batching.ElementsAs(ctx, &pbConfigs, true) + diags.Append(d...) + if diags.HasError() { + return + } + + if pbConfigs[0].SendAfter.IsNull() || pbConfigs[0].SendAfter.IsUnknown() { + pbConfigs[0].SendAfter = types.StringValue("10s") + } + + if pbConfigs[0].EnableBatching.IsNull() || pbConfigs[0].EnableBatching.IsUnknown() { + pbConfigs[0].EnableBatching = types.BoolValue(true) + } + + data.Batching, d = types.ListValueFrom(ctx, types.ObjectType{}.WithAttributeTypes(fwmodels.ProviderBatchingAttributes), pbConfigs) + } + + if (data.UserProjectOverride.IsNull() || data.UserProjectOverride.IsUnknown()) && os.Getenv("USER_PROJECT_OVERRIDE") != "" { + override, err := strconv.ParseBool(os.Getenv("USER_PROJECT_OVERRIDE")) + if err != nil { + diags.AddError( + "error parsing environment variable `USER_PROJECT_OVERRIDE` into bool", err.Error()) + } + data.UserProjectOverride = types.BoolValue(override) + } + + if (data.RequestReason.IsNull() || data.RequestReason.IsUnknown()) && os.Getenv("CLOUDSDK_CORE_REQUEST_REASON") != "" { + data.RequestReason = types.StringValue(os.Getenv("CLOUDSDK_CORE_REQUEST_REASON")) + } + + if data.RequestTimeout.IsNull() || data.RequestTimeout.IsUnknown() { + data.RequestTimeout = types.StringValue("120s") + } + + // Generated Products +{{- range $product := $.Products }} + if data.{{ $product.Name }}CustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_{{ upper (underscore $product.Name) }}_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.{{ $product.Name }}BasePathKey]) + if customEndpoint != nil { + data.{{ $product.Name }}CustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } +{{- end }} + + // Handwritten Products / Versioned / Atypical Entries + if data.CloudBillingCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_CLOUD_BILLING_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths["cloud_billing_custom_endpoint"]) + if customEndpoint != nil { + data.CloudBillingCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + + if data.ComposerCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_COMPOSER_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.ComposerBasePathKey]) + if customEndpoint != nil { + data.ComposerCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + + if data.ContainerCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_CONTAINER_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.ContainerBasePathKey]) + if customEndpoint != nil { + data.ContainerCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + + if data.DataflowCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_DATAFLOW_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.DataflowBasePathKey]) + if customEndpoint != nil { + data.DataflowCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + + if data.IamCredentialsCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_IAM_CREDENTIALS_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.IamCredentialsBasePathKey]) + if customEndpoint != nil { + data.IamCredentialsCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + + if data.ResourceManagerV3CustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_RESOURCE_MANAGER_V3_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.ResourceManagerV3BasePathKey]) + if customEndpoint != nil { + data.ResourceManagerV3CustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + +{{ if ne $.TargetVersionName `ga` -}} + if data.RuntimeConfigCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_RUNTIMECONFIG_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.RuntimeConfigBasePathKey]) + if customEndpoint != nil { + data.RuntimeConfigCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } +{{- end }} + + if data.IAMCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_IAM_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.IAMBasePathKey]) + if customEndpoint != nil { + data.IAMCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + + if data.ServiceNetworkingCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_SERVICE_NETWORKING_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.ServiceNetworkingBasePathKey]) + if customEndpoint != nil { + data.ServiceNetworkingCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + + if data.TagsLocationCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_TAGS_LOCATION_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.TagsLocationBasePathKey]) + if customEndpoint != nil { + data.TagsLocationCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + + // dcl + if data.ContainerAwsCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_CONTAINERAWS_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.ContainerAwsBasePathKey]) + if customEndpoint != nil { + data.ContainerAwsCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + + if data.ContainerAzureCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_CONTAINERAZURE_CUSTOM_ENDPOINT", + }, transport_tpg.DefaultBasePaths[transport_tpg.ContainerAzureBasePathKey]) + if customEndpoint != nil { + data.ContainerAzureCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + + // DCL generated defaults + if data.ApikeysCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_APIKEYS_CUSTOM_ENDPOINT", + }, "") + if customEndpoint != nil { + data.ApikeysCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + + if data.AssuredWorkloadsCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_ASSURED_WORKLOADS_CUSTOM_ENDPOINT", + }, "") + if customEndpoint != nil { + data.AssuredWorkloadsCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + + if data.CloudBuildWorkerPoolCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_CLOUD_BUILD_WORKER_POOL_CUSTOM_ENDPOINT", + }, "") + if customEndpoint != nil { + data.CloudBuildWorkerPoolCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + + if data.CloudResourceManagerCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_CLOUD_RESOURCE_MANAGER_CUSTOM_ENDPOINT", + }, "") + if customEndpoint != nil { + data.CloudResourceManagerCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + + if data.DataplexCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_DATAPLEX_CUSTOM_ENDPOINT", + }, "") + if customEndpoint != nil { + data.DataplexCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + + if data.EventarcCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_EVENTARC_CUSTOM_ENDPOINT", + }, "") + if customEndpoint != nil { + data.EventarcCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + + if data.FirebaserulesCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_FIREBASERULES_CUSTOM_ENDPOINT", + }, "") + if customEndpoint != nil { + data.FirebaserulesCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + + if data.NetworkConnectivityCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_NETWORK_CONNECTIVITY_CUSTOM_ENDPOINT", + }, "") + if customEndpoint != nil { + data.NetworkConnectivityCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } + + if data.RecaptchaEnterpriseCustomEndpoint.IsNull() { + customEndpoint := transport_tpg.MultiEnvDefault([]string{ + "GOOGLE_RECAPTCHA_ENTERPRISE_CUSTOM_ENDPOINT", + }, "") + if customEndpoint != nil { + data.RecaptchaEnterpriseCustomEndpoint = types.StringValue(customEndpoint.(string)) + } + } +} + +func (p *FrameworkProviderConfig) SetupClient(ctx context.Context, data fwmodels.ProviderModel, diags *diag.Diagnostics) { + tokenSource := GetTokenSource(ctx, data, false, diags) + if diags.HasError() { + return + } + + cleanCtx := context.WithValue(ctx, oauth2.HTTPClient, cleanhttp.DefaultClient()) + + // 1. MTLS TRANSPORT/CLIENT - sets up proper auth headers + client, _, err := transport.NewHTTPClient(cleanCtx, option.WithTokenSource(tokenSource)) + if err != nil { + diags.AddError("error creating new http client", err.Error()) + return + } + + // Userinfo is fetched before request logging is enabled to reduce additional noise. + p.logGoogleIdentities(ctx, data, diags) + if diags.HasError() { + return + } + + // 2. Logging Transport - ensure we log HTTP requests to GCP APIs. + loggingTransport := logging.NewTransport("Google", client.Transport) + + // 3. Retry Transport - retries common temporary errors + // Keep order for wrapping logging so we log each retried request as well. + // This value should be used if needed to create shallow copies with additional retry predicates. + // See ClientWithAdditionalRetries + retryTransport := transport_tpg.NewTransportWithDefaultRetries(loggingTransport) + + // 4. Header Transport - outer wrapper to inject additional headers we want to apply + // before making requests + headerTransport := transport_tpg.NewTransportWithHeaders(retryTransport) + if !data.RequestReason.IsNull() { + headerTransport.Set("X-Goog-Request-Reason", data.RequestReason.ValueString()) + } + + // Ensure $userProject is set for all HTTP requests using the client if specified by the provider config + // See https://cloud.google.com/apis/docs/system-parameters + if data.UserProjectOverride.ValueBool() && !data.BillingProject.IsNull() { + headerTransport.Set("X-Goog-User-Project", data.BillingProject.ValueString()) + } + + // Set final transport value. + client.Transport = headerTransport + + // This timeout is a timeout per HTTP request, not per logical operation. + timeout, err := time.ParseDuration(data.RequestTimeout.ValueString()) + if err != nil { + diags.AddError("error parsing request timeout", err.Error()) + } + client.Timeout = timeout + + p.TokenSource = tokenSource + p.Client = client +} + +func (p *FrameworkProviderConfig) SetupGrpcLogging() { + logger := logrus.StandardLogger() + + logrus.SetLevel(logrus.DebugLevel) + logrus.SetFormatter(&transport_tpg.Formatter{ + TimestampFormat: "2006/01/02 15:04:05", + LogFormat: "%time% [%lvl%] %msg% \n", + }) + + alwaysLoggingDeciderClient := func(ctx context.Context, fullMethodName string) bool { return true } + grpc_logrus.ReplaceGrpcLogger(logrus.NewEntry(logger)) + + p.gRPCLoggingOptions = append( + p.gRPCLoggingOptions, option.WithGRPCDialOption(grpc.WithUnaryInterceptor( + grpc_logrus.PayloadUnaryClientInterceptor(logrus.NewEntry(logger), alwaysLoggingDeciderClient))), + option.WithGRPCDialOption(grpc.WithStreamInterceptor( + grpc_logrus.PayloadStreamClientInterceptor(logrus.NewEntry(logger), alwaysLoggingDeciderClient))), + ) +} + +func (p *FrameworkProviderConfig) logGoogleIdentities(ctx context.Context, data fwmodels.ProviderModel, diags *diag.Diagnostics) { + // GetCurrentUserEmailFramework doesn't pass an error back from logGoogleIdentities, so we want + // a separate diagnostics here + var d diag.Diagnostics + + if data.ImpersonateServiceAccount.IsNull() || data.ImpersonateServiceAccount.IsUnknown() { + + tokenSource := GetTokenSource(ctx, data, true, diags) + if diags.HasError() { + return + } + + p.Client = oauth2.NewClient(ctx, tokenSource) // p.Client isn't initialised fully when this code is called. + + email := GetCurrentUserEmailFramework(p, p.UserAgent, &d) + if d.HasError() { + tflog.Info(ctx, "error retrieving userinfo for your provider credentials. have you enabled the 'https://www.googleapis.com/auth/userinfo.email' scope?") + } + + tflog.Info(ctx, fmt.Sprintf("Terraform is using this identity: %s", email)) + return + } + + // Drop Impersonated ClientOption from OAuth2 TokenSource to infer original identity + tokenSource := GetTokenSource(ctx, data, true, diags) + if diags.HasError() { + return + } + + p.Client = oauth2.NewClient(ctx, tokenSource) // p.Client isn't initialised fully when this code is called. + email := GetCurrentUserEmailFramework(p, p.UserAgent, &d) + if d.HasError() { + tflog.Info(ctx, "error retrieving userinfo for your provider credentials. have you enabled the 'https://www.googleapis.com/auth/userinfo.email' scope?") + } + + tflog.Info(ctx, fmt.Sprintf("Terraform is configured with service account impersonation, original identity: %s, impersonated identity: %s", email, data.ImpersonateServiceAccount.ValueString())) + + // Add the Impersonated ClientOption back in to the OAuth2 TokenSource + tokenSource = GetTokenSource(ctx, data, false, diags) + if diags.HasError() { + return + } + + p.Client = oauth2.NewClient(ctx, tokenSource) // p.Client isn't initialised fully when this code is called. + + return +} + +// Configuration helpers + +// GetTokenSource gets token source based on the Google Credentials configured. +// If initialCredentialsOnly is true, don't follow the impersonation settings and return the initial set of creds. +func GetTokenSource(ctx context.Context, data fwmodels.ProviderModel, initialCredentialsOnly bool, diags *diag.Diagnostics) oauth2.TokenSource { + creds := GetCredentials(ctx, data, initialCredentialsOnly, diags) + + return creds.TokenSource +} + +// GetCredentials gets credentials with a given scope (clientScopes). +// If initialCredentialsOnly is true, don't follow the impersonation +// settings and return the initial set of creds instead. +func GetCredentials(ctx context.Context, data fwmodels.ProviderModel, initialCredentialsOnly bool, diags *diag.Diagnostics) googleoauth.Credentials { + var clientScopes []string + var delegates []string + + if !data.Scopes.IsNull() && !data.Scopes.IsUnknown() { + d := data.Scopes.ElementsAs(ctx, &clientScopes, false) + diags.Append(d...) + if diags.HasError() { + return googleoauth.Credentials{} + } + } + + if !data.ImpersonateServiceAccountDelegates.IsNull() && !data.ImpersonateServiceAccountDelegates.IsUnknown() { + d := data.ImpersonateServiceAccountDelegates.ElementsAs(ctx, &delegates, false) + diags.Append(d...) + if diags.HasError() { + return googleoauth.Credentials{} + } + } + + if !data.AccessToken.IsNull() && !data.AccessToken.IsUnknown() { + contents, _, err := verify.PathOrContents(data.AccessToken.ValueString()) + if err != nil { + diags.AddError("error loading access token", err.Error()) + return googleoauth.Credentials{} + } + + token := &oauth2.Token{AccessToken: contents} + if !data.ImpersonateServiceAccount.IsNull() && !initialCredentialsOnly { + opts := []option.ClientOption{option.WithTokenSource(oauth2.StaticTokenSource(token)), option.ImpersonateCredentials(data.ImpersonateServiceAccount.ValueString(), delegates...), option.WithScopes(clientScopes...)} + creds, err := transport.Creds(context.TODO(), opts...) + if err != nil { + diags.AddError("error impersonating credentials", err.Error()) + return googleoauth.Credentials{} + } + return *creds + } + + tflog.Info(ctx, "Authenticating using configured Google JSON 'access_token'...") + tflog.Info(ctx, fmt.Sprintf(" -- Scopes: %s", clientScopes)) + return googleoauth.Credentials{ + TokenSource: transport_tpg.StaticTokenSource{oauth2.StaticTokenSource(token)}, + } + } + + if !data.Credentials.IsNull() && !data.Credentials.IsUnknown() { + contents, _, err := verify.PathOrContents(data.Credentials.ValueString()) + if err != nil { + diags.AddError(fmt.Sprintf("error loading credentials: %s", err), err.Error()) + return googleoauth.Credentials{} + } + if len(contents) == 0 { + diags.AddError("error loading credentials", "provided credentials are empty") + return googleoauth.Credentials{} + } + + if !data.ImpersonateServiceAccount.IsNull() && !initialCredentialsOnly { + opts := []option.ClientOption{option.WithCredentialsJSON([]byte(contents)), option.ImpersonateCredentials(data.ImpersonateServiceAccount.ValueString(), delegates...), option.WithScopes(clientScopes...)} + creds, err := transport.Creds(context.TODO(), opts...) + if err != nil { + diags.AddError("error impersonating credentials", err.Error()) + return googleoauth.Credentials{} + } + return *creds + } + + creds, err := transport.Creds(ctx, option.WithCredentialsJSON([]byte(contents)), option.WithScopes(clientScopes...)) + if err != nil { + diags.AddError("unable to parse credentials", err.Error()) + return googleoauth.Credentials{} + } + + tflog.Info(ctx, "Authenticating using configured Google JSON 'credentials'...") + tflog.Info(ctx, fmt.Sprintf(" -- Scopes: %s", clientScopes)) + return *creds + } + + if !data.ImpersonateServiceAccount.IsNull() && !initialCredentialsOnly { + opts := option.ImpersonateCredentials(data.ImpersonateServiceAccount.ValueString(), delegates...) + creds, err := transport.Creds(context.TODO(), opts, option.WithScopes(clientScopes...)) + if err != nil { + diags.AddError("error impersonating credentials", err.Error()) + return googleoauth.Credentials{} + } + + return *creds + } + + tflog.Info(ctx, "Authenticating using DefaultClient...") + tflog.Info(ctx, fmt.Sprintf(" -- Scopes: %s", clientScopes)) + creds, err := transport.Creds(context.Background(), option.WithScopes(clientScopes...)) + if err != nil { + diags.AddError(fmt.Sprintf("Attempted to load application default credentials since neither `credentials` nor `access_token` was set in the provider block. "+ + "No credentials loaded. To use your gcloud credentials, run 'gcloud auth application-default login'"), err.Error()) + return googleoauth.Credentials{} + } + + return *creds +} + +// GetBatchingConfig returns the batching config object given the +// provider configuration set for batching +func GetBatchingConfig(ctx context.Context, data types.List, diags *diag.Diagnostics) *transport_tpg.BatchingConfig { + bc := &transport_tpg.BatchingConfig{ + SendAfter: time.Second * transport_tpg.DefaultBatchSendIntervalSec, + EnableBatching: true, + } + + // Handle if entire batching block is null/unknown + if data.IsNull() || data.IsUnknown() { + return bc + } + + var pbConfigs []fwmodels.ProviderBatching + d := data.ElementsAs(ctx, &pbConfigs, true) + diags.Append(d...) + if diags.HasError() { + return bc + } + + sendAfter, err := time.ParseDuration(pbConfigs[0].SendAfter.ValueString()) + if err != nil { + diags.AddError("error parsing send after time duration", err.Error()) + return bc + } + + bc.SendAfter = sendAfter + + if !pbConfigs[0].EnableBatching.IsNull() { + bc.EnableBatching = pbConfigs[0].EnableBatching.ValueBool() + } + + return bc +} + +func GetRegionFromRegionSelfLink(selfLink basetypes.StringValue) basetypes.StringValue { + re := regexp.MustCompile("/compute/[a-zA-Z0-9]*/projects/[a-zA-Z0-9-]*/regions/([a-zA-Z0-9-]*)") + value := selfLink.String() + switch { + case re.MatchString(value): + if res := re.FindStringSubmatch(value); len(res) == 2 && res[1] != "" { + region := res[1] + return types.StringValue(region) + } + } + return selfLink +} diff --git a/mmv1/third_party/terraform/go.mod.erb b/mmv1/third_party/terraform/go.mod.erb index 679ce956db06..49c94c518fcb 100644 --- a/mmv1/third_party/terraform/go.mod.erb +++ b/mmv1/third_party/terraform/go.mod.erb @@ -118,4 +118,4 @@ require ( google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect -) \ No newline at end of file +) diff --git a/mmv1/third_party/terraform/go/go.mod b/mmv1/third_party/terraform/go/go.mod index 3d389c3577ad..57b44d578a12 100644 --- a/mmv1/third_party/terraform/go/go.mod +++ b/mmv1/third_party/terraform/go/go.mod @@ -4,7 +4,7 @@ go 1.21 require ( cloud.google.com/go/bigtable v1.30.0 - github.com/GoogleCloudPlatform/declarative-resource-client-library v1.71.0 + github.com/GoogleCloudPlatform/declarative-resource-client-library v1.72.0 github.com/apparentlymart/go-cidr v1.1.0 github.com/davecgh/go-spew v1.1.1 github.com/dnaeon/go-vcr v1.0.1 @@ -117,4 +117,4 @@ require ( google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect -) \ No newline at end of file +) diff --git a/mmv1/third_party/terraform/provider/go/provider.go.tmpl b/mmv1/third_party/terraform/provider/go/provider.go.tmpl new file mode 100644 index 000000000000..bece351a6b7f --- /dev/null +++ b/mmv1/third_party/terraform/provider/go/provider.go.tmpl @@ -0,0 +1,416 @@ +package provider + +import ( + "context" + "fmt" + "os" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/version" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +// Provider returns a *schema.Provider. +func Provider() *schema.Provider { + + // The mtls service client gives the type of endpoint (mtls/regular) + // at client creation. Since we use a shared client for requests we must + // rewrite the endpoints to be mtls endpoints for the scenario where + // mtls is enabled. + if isMtls() { + // if mtls is enabled switch all default endpoints to use the mtls endpoint + for key, bp := range transport_tpg.DefaultBasePaths { + transport_tpg.DefaultBasePaths[key] = getMtlsEndpoint(bp) + } + } + + provider := &schema.Provider{ + Schema: map[string]*schema.Schema{ + "credentials": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: ValidateCredentials, + ConflictsWith: []string{"access_token"}, + }, + + "access_token": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: ValidateEmptyStrings, + ConflictsWith: []string{"credentials"}, + }, + + "impersonate_service_account": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: ValidateEmptyStrings, + }, + + "impersonate_service_account_delegates": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "project": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: ValidateEmptyStrings, + }, + + "billing_project": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: ValidateEmptyStrings, + }, + + "region": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: ValidateEmptyStrings, + }, + + "zone": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: ValidateEmptyStrings, + }, + + "scopes": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "universe_domain": { + Type: schema.TypeString, + Optional: true, + }, + + "batching": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "send_after": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateNonNegativeDuration(), + }, + "enable_batching": { + Type: schema.TypeBool, + Optional: true, + }, + }, + }, + }, + + "user_project_override": { + Type: schema.TypeBool, + Optional: true, + }, + + "request_timeout": { + Type: schema.TypeString, + Optional: true, + }, + + "request_reason": { + Type: schema.TypeString, + Optional: true, + }, + + "default_labels": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "add_terraform_attribution_label": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + + "terraform_attribution_label_addition_strategy": { + Type: schema.TypeString, + Optional: true, + }, + + // Generated Products + {{- range $product := $.Products }} + "{{ underscore $product.Name }}_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: transport_tpg.ValidateCustomEndpoint, + }, + {{- end }} + + // Handwritten Products / Versioned / Atypical Entries + transport_tpg.CloudBillingCustomEndpointEntryKey: transport_tpg.CloudBillingCustomEndpointEntry, + transport_tpg.ComposerCustomEndpointEntryKey: transport_tpg.ComposerCustomEndpointEntry, + transport_tpg.ContainerCustomEndpointEntryKey: transport_tpg.ContainerCustomEndpointEntry, + transport_tpg.DataflowCustomEndpointEntryKey: transport_tpg.DataflowCustomEndpointEntry, + transport_tpg.IamCredentialsCustomEndpointEntryKey: transport_tpg.IamCredentialsCustomEndpointEntry, + transport_tpg.ResourceManagerV3CustomEndpointEntryKey: transport_tpg.ResourceManagerV3CustomEndpointEntry, + {{- if ne $.TargetVersionName "ga" }} + transport_tpg.RuntimeConfigCustomEndpointEntryKey: transport_tpg.RuntimeConfigCustomEndpointEntry, + {{- end }} + transport_tpg.IAMCustomEndpointEntryKey: transport_tpg.IAMCustomEndpointEntry, + transport_tpg.ServiceNetworkingCustomEndpointEntryKey: transport_tpg.ServiceNetworkingCustomEndpointEntry, + transport_tpg.TagsLocationCustomEndpointEntryKey: transport_tpg.TagsLocationCustomEndpointEntry, + + // dcl + transport_tpg.ContainerAwsCustomEndpointEntryKey: transport_tpg.ContainerAwsCustomEndpointEntry, + transport_tpg.ContainerAzureCustomEndpointEntryKey: transport_tpg.ContainerAzureCustomEndpointEntry, + }, + + ProviderMetaSchema: map[string]*schema.Schema{ + "module_name": { + Type: schema.TypeString, + Optional: true, + }, + }, + + DataSourcesMap: DatasourceMap(), + ResourcesMap: ResourceMap(), + } + + provider.ConfigureContextFunc = func(ctx context.Context, d *schema.ResourceData) (interface{}, diag.Diagnostics) { + return ProviderConfigure(ctx, d, provider) + } + + transport_tpg.ConfigureDCLProvider(provider) + + return provider +} + +func DatasourceMap() map[string]*schema.Resource { + datasourceMap, _ := DatasourceMapWithErrors() + return datasourceMap +} + +func DatasourceMapWithErrors() (map[string]*schema.Resource, error) { + return mergeResourceMaps( + handwrittenDatasources, + generatedIAMDatasources, + handwrittenIAMDatasources, + ) +} + +func ResourceMap() map[string]*schema.Resource { + resourceMap, _ := ResourceMapWithErrors() + return resourceMap +} + +func ResourceMapWithErrors() (map[string]*schema.Resource, error) { + return mergeResourceMaps( + generatedResources, + handwrittenResources, + handwrittenIAMResources, + dclResources, + ) +} + +func ProviderConfigure(ctx context.Context, d *schema.ResourceData, p *schema.Provider) (interface{}, diag.Diagnostics) { + err := transport_tpg.HandleSDKDefaults(d) + if err != nil { + return nil, diag.FromErr(err) + } + + config := transport_tpg.Config{ + Project: d.Get("project").(string), + Region: d.Get("region").(string), + Zone: d.Get("zone").(string), + UserProjectOverride: d.Get("user_project_override").(bool), + BillingProject: d.Get("billing_project").(string), +{{- if or (eq $.TargetVersionName "") (eq $.TargetVersionName "ga") }} + UserAgent: p.UserAgent("terraform-provider-google", version.ProviderVersion), +{{- else }} + UserAgent: p.UserAgent("terraform-provider-google-{{ $.TargetVersionName }}", version.ProviderVersion), +{{- end }} + } + + // opt in extension for adding to the User-Agent header + if ext := os.Getenv("GOOGLE_TERRAFORM_USERAGENT_EXTENSION"); ext != "" { + ua := config.UserAgent + config.UserAgent = fmt.Sprintf("%s %s", ua, ext) + } + + if v, ok := d.GetOk("request_timeout"); ok { + var err error + config.RequestTimeout, err = time.ParseDuration(v.(string)) + if err != nil { + return nil, diag.FromErr(err) + } + } + + if v, ok := d.GetOk("request_reason"); ok { + config.RequestReason = v.(string) + } + + // Check for primary credentials in config. Note that if neither is set, ADCs + // will be used if available. + if v, ok := d.GetOk("access_token"); ok { + config.AccessToken = v.(string) + } + + if v, ok := d.GetOk("credentials"); ok { + config.Credentials = v.(string) + } + + // only check environment variables if neither value was set in config- this + // means config beats env var in all cases. + if config.AccessToken == "" && config.Credentials == "" { + config.Credentials = transport_tpg.MultiEnvSearch([]string{ + "GOOGLE_CREDENTIALS", + "GOOGLE_CLOUD_KEYFILE_JSON", + "GCLOUD_KEYFILE_JSON", + }) + + config.AccessToken = transport_tpg.MultiEnvSearch([]string{ + "GOOGLE_OAUTH_ACCESS_TOKEN", + }) + } + + // Set the universe domain to the configured value, if any + if v, ok := d.GetOk("universe_domain"); ok { + config.UniverseDomain = v.(string) + } + + // Configure DCL basePath + transport_tpg.ProviderDCLConfigure(d, &config) + + // Replace hostname by the universe_domain field. + if config.UniverseDomain != "" && config.UniverseDomain != "googleapis.com" { + for key, basePath := range transport_tpg.DefaultBasePaths { + transport_tpg.DefaultBasePaths[key] = strings.ReplaceAll(basePath, "googleapis.com", config.UniverseDomain) + } + } + + err = transport_tpg.SetEndpointDefaults(d) + if err != nil { + return nil, diag.FromErr(err) + } + transport_tpg.HandleDCLCustomEndpointDefaults(d) + + // Given that impersonate_service_account is a secondary auth method, it has + // no conflicts to worry about. We pull the env var in a DefaultFunc. + if v, ok := d.GetOk("impersonate_service_account"); ok { + config.ImpersonateServiceAccount = v.(string) + } + + delegates := d.Get("impersonate_service_account_delegates").([]interface{}) + if len(delegates) > 0 { + config.ImpersonateServiceAccountDelegates = make([]string, len(delegates)) + } + for i, delegate := range delegates { + config.ImpersonateServiceAccountDelegates[i] = delegate.(string) + } + + scopes := d.Get("scopes").([]interface{}) + if len(scopes) > 0 { + config.Scopes = make([]string, len(scopes)) + } + for i, scope := range scopes { + config.Scopes[i] = scope.(string) + } + + config.DefaultLabels = make(map[string]string) + defaultLabels := d.Get("default_labels").(map[string]interface{}) + + for k, v := range defaultLabels { + config.DefaultLabels[k] = v.(string) + } + + config.AddTerraformAttributionLabel = d.Get("add_terraform_attribution_label").(bool) + if config.AddTerraformAttributionLabel { + config.TerraformAttributionLabelAdditionStrategy = transport_tpg.CreateOnlyAttributionStrategy + if v, ok := d.GetOk("terraform_attribution_label_addition_strategy"); ok { + config.TerraformAttributionLabelAdditionStrategy = v.(string) + } + switch config.TerraformAttributionLabelAdditionStrategy { + case transport_tpg.CreateOnlyAttributionStrategy, transport_tpg.ProactiveAttributionStrategy: + default: + return nil, diag.FromErr(fmt.Errorf("unrecognized terraform_attribution_label_addition_strategy %q", config.TerraformAttributionLabelAdditionStrategy)) + } + } + + batchCfg, err := transport_tpg.ExpandProviderBatchingConfig(d.Get("batching")) + if err != nil { + return nil, diag.FromErr(err) + } + config.BatchingConfig = batchCfg + + // Generated products + {{- range $product := $.Products }} + config.{{ $product.Name }}BasePath = d.Get("{{ underscore $product.Name }}_custom_endpoint").(string) + {{- end }} + + // Handwritten Products / Versioned / Atypical Entries + config.CloudBillingBasePath = d.Get(transport_tpg.CloudBillingCustomEndpointEntryKey).(string) + config.ComposerBasePath = d.Get(transport_tpg.ComposerCustomEndpointEntryKey).(string) + config.ContainerBasePath = d.Get(transport_tpg.ContainerCustomEndpointEntryKey).(string) + config.DataflowBasePath = d.Get(transport_tpg.DataflowCustomEndpointEntryKey).(string) + config.IamCredentialsBasePath = d.Get(transport_tpg.IamCredentialsCustomEndpointEntryKey).(string) + config.ResourceManagerV3BasePath = d.Get(transport_tpg.ResourceManagerV3CustomEndpointEntryKey).(string) + {{- if ne $.TargetVersionName "ga" }} + config.RuntimeConfigBasePath = d.Get(transport_tpg.RuntimeConfigCustomEndpointEntryKey).(string) + {{- end }} + config.IAMBasePath = d.Get(transport_tpg.IAMCustomEndpointEntryKey).(string) + config.ServiceUsageBasePath = d.Get(transport_tpg.ServiceUsageCustomEndpointEntryKey).(string) + config.BigtableAdminBasePath = d.Get(transport_tpg.BigtableAdminCustomEndpointEntryKey).(string) + config.TagsLocationBasePath = d.Get(transport_tpg.TagsLocationCustomEndpointEntryKey).(string) + + // dcl + config.ContainerAwsBasePath = d.Get(transport_tpg.ContainerAwsCustomEndpointEntryKey).(string) + config.ContainerAzureBasePath = d.Get(transport_tpg.ContainerAzureCustomEndpointEntryKey).(string) + + stopCtx, ok := schema.StopContext(ctx) + if !ok { + stopCtx = ctx + } + if err := config.LoadAndValidate(stopCtx); err != nil { + return nil, diag.FromErr(err) + } + + // Verify that universe domains match between credentials and configuration + if v, ok := d.GetOk("universe_domain"); ok { + if config.UniverseDomain == "" && v.(string) != "googleapis.com" { // v can't be "", as it wouldn't pass `ok` above + return nil, diag.FromErr(fmt.Errorf("Universe domain mismatch: '%s' supplied directly to Terraform with no matching universe domain in credentials. Credentials with no 'universe_domain' set are assumed to be in the default universe.", v)) + } else if v.(string) != config.UniverseDomain && !(config.UniverseDomain == "" && v.(string) == "googleapis.com") { + return nil, diag.FromErr(fmt.Errorf("Universe domain mismatch: '%s' does not match the universe domain '%s' supplied directly to Terraform. The 'universe_domain' provider configuration must match the universe domain supplied by credentials.", config.UniverseDomain, v)) + } + } else if config.UniverseDomain != "" && config.UniverseDomain != "googleapis.com" { + return nil, diag.FromErr(fmt.Errorf("Universe domain mismatch: Universe domain '%s' was found in credentials without a corresponding 'universe_domain' provider configuration set. Please set 'universe_domain' to '%s' or use different credentials.", config.UniverseDomain, config.UniverseDomain)) + } + + return &config, nil +} + +func mergeResourceMaps(ms ...map[string]*schema.Resource) (map[string]*schema.Resource, error) { + merged := make(map[string]*schema.Resource) + duplicates := []string{} + + for _, m := range ms { + for k, v := range m { + if _, ok := merged[k]; ok { + duplicates = append(duplicates, k) + } + + merged[k] = v + } + } + + var err error + if len(duplicates) > 0 { + err = fmt.Errorf("saw duplicates in mergeResourceMaps: %v", duplicates) + } + + return merged, err +} diff --git a/mmv1/third_party/terraform/provider/go/provider_mmv1_resources.go.tmpl b/mmv1/third_party/terraform/provider/go/provider_mmv1_resources.go.tmpl new file mode 100644 index 000000000000..c7dc42e4882f --- /dev/null +++ b/mmv1/third_party/terraform/provider/go/provider_mmv1_resources.go.tmpl @@ -0,0 +1,457 @@ +package provider + +import ( + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + {{- range $service := $.GetMmv1ServicesInVersion $.Products }} + "github.com/hashicorp/terraform-provider-google/google/services/{{ $service }}" + {{- end }} + + {{ if eq $.TargetVersionName `ga` }} + "github.com/hashicorp/terraform-provider-google/google/services/composer" + "github.com/hashicorp/terraform-provider-google/google/services/siteverification" + {{- end }} + "github.com/hashicorp/terraform-provider-google/google/services/container" + "github.com/hashicorp/terraform-provider-google/google/services/containeraws" + "github.com/hashicorp/terraform-provider-google/google/services/containerazure" + "github.com/hashicorp/terraform-provider-google/google/services/dataflow" + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" +) + +// Datasources +var handwrittenDatasources = map[string]*schema.Resource{ + // ####### START handwritten datasources ########### + "google_access_approval_folder_service_account": accessapproval.DataSourceAccessApprovalFolderServiceAccount(), + "google_access_approval_organization_service_account": accessapproval.DataSourceAccessApprovalOrganizationServiceAccount(), + "google_access_approval_project_service_account": accessapproval.DataSourceAccessApprovalProjectServiceAccount(), + "google_active_folder": resourcemanager.DataSourceGoogleActiveFolder(), + "google_alloydb_locations": alloydb.DataSourceAlloydbLocations(), + "google_alloydb_supported_database_flags": alloydb.DataSourceAlloydbSupportedDatabaseFlags(), + "google_artifact_registry_docker_image": artifactregistry.DataSourceArtifactRegistryDockerImage(), + "google_artifact_registry_locations": artifactregistry.DataSourceGoogleArtifactRegistryLocations(), + "google_artifact_registry_repository": artifactregistry.DataSourceArtifactRegistryRepository(), + "google_apphub_discovered_workload": apphub.DataSourceApphubDiscoveredWorkload(), + "google_app_engine_default_service_account": appengine.DataSourceGoogleAppEngineDefaultServiceAccount(), + "google_apphub_application": apphub.DataSourceGoogleApphubApplication(), + "google_apphub_discovered_service": apphub.DataSourceApphubDiscoveredService(), + {{- if ne $.TargetVersionName "ga" }} + "google_backup_dr_management_server": backupdr.DataSourceGoogleCloudBackupDRService(), + {{- end }} + "google_beyondcorp_app_connection": beyondcorp.DataSourceGoogleBeyondcorpAppConnection(), + "google_beyondcorp_app_connector": beyondcorp.DataSourceGoogleBeyondcorpAppConnector(), + "google_beyondcorp_app_gateway": beyondcorp.DataSourceGoogleBeyondcorpAppGateway(), + "google_billing_account": billing.DataSourceGoogleBillingAccount(), + "google_bigquery_dataset": bigquery.DataSourceGoogleBigqueryDataset(), + "google_bigquery_default_service_account": bigquery.DataSourceGoogleBigqueryDefaultServiceAccount(), + "google_certificate_manager_certificates": certificatemanager.DataSourceGoogleCertificateManagerCertificates(), + "google_certificate_manager_certificate_map": certificatemanager.DataSourceGoogleCertificateManagerCertificateMap(), + "google_cloudbuild_trigger": cloudbuild.DataSourceGoogleCloudBuildTrigger(), + "google_cloudfunctions_function": cloudfunctions.DataSourceGoogleCloudFunctionsFunction(), + "google_cloudfunctions2_function": cloudfunctions2.DataSourceGoogleCloudFunctions2Function(), + {{- if ne $.TargetVersionName "ga" }} + "google_cloud_asset_resources_search_all": cloudasset.DataSourceGoogleCloudAssetResourcesSearchAll(), + {{- end }} + "google_cloud_asset_search_all_resources": cloudasset.DataSourceGoogleCloudAssetSearchAllResources(), + "google_cloud_identity_groups": cloudidentity.DataSourceGoogleCloudIdentityGroups(), + "google_cloud_identity_group_memberships": cloudidentity.DataSourceGoogleCloudIdentityGroupMemberships(), + "google_cloud_identity_group_transitive_memberships": cloudidentity.DataSourceGoogleCloudIdentityGroupTransitiveMemberships(), + "google_cloud_identity_group_lookup": cloudidentity.DataSourceGoogleCloudIdentityGroupLookup(), + "google_cloud_quotas_quota_info": cloudquotas.DataSourceGoogleCloudQuotasQuotaInfo(), + "google_cloud_quotas_quota_infos": cloudquotas.DataSourceGoogleCloudQuotasQuotaInfos(), + "google_cloud_run_locations": cloudrun.DataSourceGoogleCloudRunLocations(), + "google_cloud_run_service": cloudrun.DataSourceGoogleCloudRunService(), + "google_cloud_run_v2_job": cloudrunv2.DataSourceGoogleCloudRunV2Job(), + "google_cloud_run_v2_service": cloudrunv2.DataSourceGoogleCloudRunV2Service(), + "google_composer_environment": composer.DataSourceGoogleComposerEnvironment(), + {{- if ne $.TargetVersionName "ga" }} + "google_composer_user_workloads_config_map": composer.DataSourceGoogleComposerUserWorkloadsConfigMap(), + "google_composer_user_workloads_secret": composer.DataSourceGoogleComposerUserWorkloadsSecret(), + {{- end }} + "google_composer_image_versions": composer.DataSourceGoogleComposerImageVersions(), + "google_compute_address": compute.DataSourceGoogleComputeAddress(), + "google_compute_addresses": compute.DataSourceGoogleComputeAddresses(), + "google_compute_backend_service": compute.DataSourceGoogleComputeBackendService(), + "google_compute_backend_bucket": compute.DataSourceGoogleComputeBackendBucket(), + "google_compute_default_service_account": compute.DataSourceGoogleComputeDefaultServiceAccount(), + "google_compute_disk": compute.DataSourceGoogleComputeDisk(), + "google_compute_forwarding_rule": compute.DataSourceGoogleComputeForwardingRule(), + "google_compute_forwarding_rules": compute.DataSourceGoogleComputeForwardingRules(), + "google_compute_global_address": compute.DataSourceGoogleComputeGlobalAddress(), + "google_compute_global_forwarding_rule": compute.DataSourceGoogleComputeGlobalForwardingRule(), + "google_compute_ha_vpn_gateway": compute.DataSourceGoogleComputeHaVpnGateway(), + "google_compute_health_check": compute.DataSourceGoogleComputeHealthCheck(), + "google_compute_image": compute.DataSourceGoogleComputeImage(), + "google_compute_instance": compute.DataSourceGoogleComputeInstance(), + "google_compute_instance_group": compute.DataSourceGoogleComputeInstanceGroup(), + "google_compute_instance_group_manager": compute.DataSourceGoogleComputeInstanceGroupManager(), + "google_compute_instance_serial_port": compute.DataSourceGoogleComputeInstanceSerialPort(), + "google_compute_instance_template": compute.DataSourceGoogleComputeInstanceTemplate(), + "google_compute_lb_ip_ranges": compute.DataSourceGoogleComputeLbIpRanges(), + "google_compute_machine_types": compute.DataSourceGoogleComputeMachineTypes(), + "google_compute_network": compute.DataSourceGoogleComputeNetwork(), + "google_compute_networks": compute.DataSourceGoogleComputeNetworks(), + "google_compute_network_endpoint_group": compute.DataSourceGoogleComputeNetworkEndpointGroup(), + "google_compute_network_peering": compute.DataSourceComputeNetworkPeering(), + "google_compute_node_types": compute.DataSourceGoogleComputeNodeTypes(), + "google_compute_regions": compute.DataSourceGoogleComputeRegions(), + "google_compute_region_disk": compute.DataSourceGoogleComputeRegionDisk(), + "google_compute_region_instance_group": compute.DataSourceGoogleComputeRegionInstanceGroup(), + "google_compute_region_instance_template": compute.DataSourceGoogleComputeRegionInstanceTemplate(), + "google_compute_region_network_endpoint_group": compute.DataSourceGoogleComputeRegionNetworkEndpointGroup(), + "google_compute_region_ssl_certificate": compute.DataSourceGoogleRegionComputeSslCertificate(), + "google_compute_reservation": compute.DataSourceGoogleComputeReservation(), + "google_compute_resource_policy": compute.DataSourceGoogleComputeResourcePolicy(), + "google_compute_router": compute.DataSourceGoogleComputeRouter(), + "google_compute_router_nat": compute.DataSourceGoogleComputeRouterNat(), + "google_compute_router_status": compute.DataSourceGoogleComputeRouterStatus(), + "google_compute_security_policy": compute.DataSourceGoogleComputeSecurityPolicy(), + "google_compute_snapshot": compute.DataSourceGoogleComputeSnapshot(), + "google_compute_ssl_certificate": compute.DataSourceGoogleComputeSslCertificate(), + "google_compute_ssl_policy": compute.DataSourceGoogleComputeSslPolicy(), + "google_compute_subnetwork": compute.DataSourceGoogleComputeSubnetwork(), + "google_compute_subnetworks": compute.DataSourceGoogleComputeSubnetworks(), + "google_compute_vpn_gateway": compute.DataSourceGoogleComputeVpnGateway(), + "google_compute_zones": compute.DataSourceGoogleComputeZones(), + "google_container_azure_versions": containerazure.DataSourceGoogleContainerAzureVersions(), + "google_container_aws_versions": containeraws.DataSourceGoogleContainerAwsVersions(), + "google_container_attached_versions": containerattached.DataSourceGoogleContainerAttachedVersions(), + "google_container_attached_install_manifest": containerattached.DataSourceGoogleContainerAttachedInstallManifest(), + "google_container_cluster": container.DataSourceGoogleContainerCluster(), + "google_container_engine_versions": container.DataSourceGoogleContainerEngineVersions(), + "google_container_registry_image": containeranalysis.DataSourceGoogleContainerImage(), + "google_container_registry_repository": containeranalysis.DataSourceGoogleContainerRepo(), + "google_dataproc_metastore_service": dataprocmetastore.DataSourceDataprocMetastoreService(), + "google_datastream_static_ips": datastream.DataSourceGoogleDatastreamStaticIps(), + "google_dns_keys": dns.DataSourceDNSKeys(), + "google_dns_managed_zone": dns.DataSourceDnsManagedZone(), + "google_dns_managed_zones": dns.DataSourceDnsManagedZones(), + "google_dns_record_set": dns.DataSourceDnsRecordSet(), + "google_gke_hub_membership_binding": gkehub2.DataSourceGoogleGkeHubMembershipBinding(), + "google_filestore_instance": filestore.DataSourceGoogleFilestoreInstance(), + "google_iam_policy": resourcemanager.DataSourceGoogleIamPolicy(), + "google_iam_role": resourcemanager.DataSourceGoogleIamRole(), + "google_iam_testable_permissions": resourcemanager.DataSourceGoogleIamTestablePermissions(), + {{- if ne $.TargetVersionName "ga" }} + "google_iam_workload_identity_pool": iambeta.DataSourceIAMBetaWorkloadIdentityPool(), + "google_iam_workload_identity_pool_provider": iambeta.DataSourceIAMBetaWorkloadIdentityPoolProvider(), + {{- end }} + "google_iap_client": iap.DataSourceGoogleIapClient(), + "google_kms_crypto_key": kms.DataSourceGoogleKmsCryptoKey(), + "google_kms_crypto_keys": kms.DataSourceGoogleKmsCryptoKeys(), + "google_kms_crypto_key_version": kms.DataSourceGoogleKmsCryptoKeyVersion(), + "google_kms_crypto_key_latest_version": kms.DataSourceGoogleKmsLatestCryptoKeyVersion(), + "google_kms_crypto_key_versions": kms.DataSourceGoogleKmsCryptoKeyVersions(), + "google_kms_key_ring": kms.DataSourceGoogleKmsKeyRing(), + "google_kms_key_rings": kms.DataSourceGoogleKmsKeyRings(), + "google_kms_secret": kms.DataSourceGoogleKmsSecret(), + "google_kms_secret_ciphertext": kms.DataSourceGoogleKmsSecretCiphertext(), + {{- if ne $.TargetVersionName "ga" }} + "google_kms_secret_asymmetric": kms.DataSourceGoogleKmsSecretAsymmetric(), + "google_firebase_android_app": firebase.DataSourceGoogleFirebaseAndroidApp(), + "google_firebase_apple_app": firebase.DataSourceGoogleFirebaseAppleApp(), + "google_firebase_hosting_channel": firebasehosting.DataSourceGoogleFirebaseHostingChannel(), + "google_firebase_web_app": firebase.DataSourceGoogleFirebaseWebApp(), + {{- end }} + "google_folder": resourcemanager.DataSourceGoogleFolder(), + "google_folders": resourcemanager.DataSourceGoogleFolders(), + "google_folder_organization_policy": resourcemanager.DataSourceGoogleFolderOrganizationPolicy(), + "google_logging_folder_settings": logging.DataSourceGoogleLoggingFolderSettings(), + "google_logging_organization_settings": logging.DataSourceGoogleLoggingOrganizationSettings(), + "google_logging_project_cmek_settings": logging.DataSourceGoogleLoggingProjectCmekSettings(), + "google_logging_project_settings": logging.DataSourceGoogleLoggingProjectSettings(), + "google_logging_sink": logging.DataSourceGoogleLoggingSink(), + "google_monitoring_notification_channel": monitoring.DataSourceMonitoringNotificationChannel(), + "google_monitoring_cluster_istio_service": monitoring.DataSourceMonitoringServiceClusterIstio(), + "google_monitoring_istio_canonical_service": monitoring.DataSourceMonitoringIstioCanonicalService(), + "google_monitoring_mesh_istio_service": monitoring.DataSourceMonitoringServiceMeshIstio(), + "google_monitoring_app_engine_service": monitoring.DataSourceMonitoringServiceAppEngine(), + "google_monitoring_uptime_check_ips": monitoring.DataSourceGoogleMonitoringUptimeCheckIps(), + "google_netblock_ip_ranges": resourcemanager.DataSourceGoogleNetblockIpRanges(), + "google_organization": resourcemanager.DataSourceGoogleOrganization(), + "google_privateca_certificate_authority": privateca.DataSourcePrivatecaCertificateAuthority(), + "google_project": resourcemanager.DataSourceGoogleProject(), + "google_projects": resourcemanager.DataSourceGoogleProjects(), + "google_project_organization_policy": resourcemanager.DataSourceGoogleProjectOrganizationPolicy(), + "google_project_service": resourcemanager.DataSourceGoogleProjectService(), + "google_pubsub_subscription": pubsub.DataSourceGooglePubsubSubscription(), + "google_pubsub_topic": pubsub.DataSourceGooglePubsubTopic(), + {{- if ne $.TargetVersionName "ga" }} + "google_runtimeconfig_config": runtimeconfig.DataSourceGoogleRuntimeconfigConfig(), + "google_runtimeconfig_variable": runtimeconfig.DataSourceGoogleRuntimeconfigVariable(), + {{- end }} + "google_secret_manager_secret": secretmanager.DataSourceSecretManagerSecret(), + "google_secret_manager_secrets": secretmanager.DataSourceSecretManagerSecrets(), + "google_secret_manager_secret_version": secretmanager.DataSourceSecretManagerSecretVersion(), + "google_secret_manager_secret_version_access": secretmanager.DataSourceSecretManagerSecretVersionAccess(), + "google_service_account": resourcemanager.DataSourceGoogleServiceAccount(), + "google_service_account_access_token": resourcemanager.DataSourceGoogleServiceAccountAccessToken(), + "google_service_account_id_token": resourcemanager.DataSourceGoogleServiceAccountIdToken(), + "google_service_account_jwt": resourcemanager.DataSourceGoogleServiceAccountJwt(), + "google_service_account_key": resourcemanager.DataSourceGoogleServiceAccountKey(), + "google_site_verification_token": siteverification.DataSourceSiteVerificationToken(), + "google_sourcerepo_repository": sourcerepo.DataSourceGoogleSourceRepoRepository(), + "google_spanner_instance": spanner.DataSourceSpannerInstance(), + "google_sql_ca_certs": sql.DataSourceGoogleSQLCaCerts(), + "google_sql_tiers": sql.DataSourceGoogleSQLTiers(), + "google_sql_database_instance_latest_recovery_time": sql.DataSourceSqlDatabaseInstanceLatestRecoveryTime(), + "google_sql_backup_run": sql.DataSourceSqlBackupRun(), + "google_sql_databases": sql.DataSourceSqlDatabases(), + "google_sql_database": sql.DataSourceSqlDatabase(), + "google_sql_database_instance": sql.DataSourceSqlDatabaseInstance(), + "google_sql_database_instances": sql.DataSourceSqlDatabaseInstances(), + "google_service_networking_peered_dns_domain": servicenetworking.DataSourceGoogleServiceNetworkingPeeredDNSDomain(), + "google_storage_bucket": storage.DataSourceGoogleStorageBucket(), + "google_storage_buckets": storage.DataSourceGoogleStorageBuckets(), + "google_storage_bucket_object": storage.DataSourceGoogleStorageBucketObject(), + "google_storage_bucket_objects": storage.DataSourceGoogleStorageBucketObjects(), + "google_storage_bucket_object_content": storage.DataSourceGoogleStorageBucketObjectContent(), + "google_storage_object_signed_url": storage.DataSourceGoogleSignedUrl(), + "google_storage_project_service_account": storage.DataSourceGoogleStorageProjectServiceAccount(), + "google_storage_transfer_project_service_account": storagetransfer.DataSourceGoogleStorageTransferProjectServiceAccount(), + "google_tags_tag_key": tags.DataSourceGoogleTagsTagKey(), + "google_tags_tag_keys": tags.DataSourceGoogleTagsTagKeys(), + "google_tags_tag_value": tags.DataSourceGoogleTagsTagValue(), + "google_tags_tag_values": tags.DataSourceGoogleTagsTagValues(), + "google_tpu_tensorflow_versions": tpu.DataSourceTpuTensorflowVersions(), + {{- if ne $.TargetVersionName "ga" }} + "google_tpu_v2_runtime_versions": tpuv2.DataSourceTpuV2RuntimeVersions(), + "google_tpu_v2_accelerator_types": tpuv2.DataSourceTpuV2AcceleratorTypes(), + {{- end }} + "google_vpc_access_connector": vpcaccess.DataSourceVPCAccessConnector(), + "google_redis_instance": redis.DataSourceGoogleRedisInstance(), + "google_vertex_ai_index": vertexai.DataSourceVertexAIIndex(), + "google_vmwareengine_cluster": vmwareengine.DataSourceVmwareengineCluster(), + "google_vmwareengine_external_access_rule": vmwareengine.DataSourceVmwareengineExternalAccessRule(), + "google_vmwareengine_external_address": vmwareengine.DataSourceVmwareengineExternalAddress(), + "google_vmwareengine_network": vmwareengine.DataSourceVmwareengineNetwork(), + "google_vmwareengine_network_peering": vmwareengine.DataSourceVmwareengineNetworkPeering(), + "google_vmwareengine_network_policy": vmwareengine.DataSourceVmwareengineNetworkPolicy(), + "google_vmwareengine_nsx_credentials": vmwareengine.DataSourceVmwareengineNsxCredentials(), + "google_vmwareengine_private_cloud": vmwareengine.DataSourceVmwareenginePrivateCloud(), + "google_vmwareengine_subnet": vmwareengine.DataSourceVmwareengineSubnet(), + "google_vmwareengine_vcenter_credentials": vmwareengine.DataSourceVmwareengineVcenterCredentials(), + + // ####### END handwritten datasources ########### +} + +var generatedIAMDatasources = map[string]*schema.Resource{ + // ####### START generated IAM datasources ########### + {{- range $object := $.ResourcesForVersion }} + {{- if $object.IamClassName }} + "{{ $object.TerraformName }}_iam_policy": tpgiamresource.DataSourceIamPolicy({{ $object.IamClassName }}IamSchema, {{ $object.IamClassName }}IamUpdaterProducer), + {{- end }} + {{- end }} + // ####### END generated IAM datasources ########### +} + +var handwrittenIAMDatasources = map[string]*schema.Resource{ + // ####### START non-generated IAM datasources ########### + "google_bigtable_instance_iam_policy": tpgiamresource.DataSourceIamPolicy(bigtable.IamBigtableInstanceSchema, bigtable.NewBigtableInstanceUpdater), + "google_bigtable_table_iam_policy": tpgiamresource.DataSourceIamPolicy(bigtable.IamBigtableTableSchema, bigtable.NewBigtableTableUpdater), + "google_bigquery_dataset_iam_policy": tpgiamresource.DataSourceIamPolicy(bigquery.IamBigqueryDatasetSchema, bigquery.NewBigqueryDatasetIamUpdater), + "google_billing_account_iam_policy": tpgiamresource.DataSourceIamPolicy(billing.IamBillingAccountSchema, billing.NewBillingAccountIamUpdater), + "google_dataproc_cluster_iam_policy": tpgiamresource.DataSourceIamPolicy(dataproc.IamDataprocClusterSchema, dataproc.NewDataprocClusterUpdater), + "google_dataproc_job_iam_policy": tpgiamresource.DataSourceIamPolicy(dataproc.IamDataprocJobSchema, dataproc.NewDataprocJobUpdater), + "google_folder_iam_policy": tpgiamresource.DataSourceIamPolicy(resourcemanager.IamFolderSchema, resourcemanager.NewFolderIamUpdater), + "google_healthcare_dataset_iam_policy": tpgiamresource.DataSourceIamPolicy(healthcare.IamHealthcareDatasetSchema, healthcare.NewHealthcareDatasetIamUpdater), + "google_healthcare_dicom_store_iam_policy": tpgiamresource.DataSourceIamPolicy(healthcare.IamHealthcareDicomStoreSchema, healthcare.NewHealthcareDicomStoreIamUpdater), + "google_healthcare_fhir_store_iam_policy": tpgiamresource.DataSourceIamPolicy(healthcare.IamHealthcareFhirStoreSchema, healthcare.NewHealthcareFhirStoreIamUpdater), + "google_healthcare_hl7_v2_store_iam_policy": tpgiamresource.DataSourceIamPolicy(healthcare.IamHealthcareHl7V2StoreSchema, healthcare.NewHealthcareHl7V2StoreIamUpdater), + "google_kms_key_ring_iam_policy": tpgiamresource.DataSourceIamPolicy(kms.IamKmsKeyRingSchema, kms.NewKmsKeyRingIamUpdater), + "google_kms_crypto_key_iam_policy": tpgiamresource.DataSourceIamPolicy(kms.IamKmsCryptoKeySchema, kms.NewKmsCryptoKeyIamUpdater), + "google_spanner_instance_iam_policy": tpgiamresource.DataSourceIamPolicy(spanner.IamSpannerInstanceSchema, spanner.NewSpannerInstanceIamUpdater), + "google_spanner_database_iam_policy": tpgiamresource.DataSourceIamPolicy(spanner.IamSpannerDatabaseSchema, spanner.NewSpannerDatabaseIamUpdater), + "google_storage_managed_folder_iam_policy": tpgiamresource.DataSourceIamPolicy(storage.StorageManagedFolderIamSchema, storage.StorageManagedFolderIamUpdaterProducer), + "google_organization_iam_policy": tpgiamresource.DataSourceIamPolicy(resourcemanager.IamOrganizationSchema, resourcemanager.NewOrganizationIamUpdater), + "google_project_iam_policy": tpgiamresource.DataSourceIamPolicy(resourcemanager.IamProjectSchema, resourcemanager.NewProjectIamUpdater), + "google_pubsub_subscription_iam_policy": tpgiamresource.DataSourceIamPolicy(pubsub.IamPubsubSubscriptionSchema, pubsub.NewPubsubSubscriptionIamUpdater), + "google_service_account_iam_policy": tpgiamresource.DataSourceIamPolicy(resourcemanager.IamServiceAccountSchema, resourcemanager.NewServiceAccountIamUpdater), + // ####### END non-generated IAM datasources ########### +} + +// Resources +// Generated resources: {{ $.ResourceCount }} +// Generated IAM resources: {{ $.IAMResourceCount }} +// Total generated resources: {{ plus $.ResourceCount $.IAMResourceCount }} +var generatedResources = map[string]*schema.Resource{ + {{- range $object := $.ResourcesForVersion }} + {{- if $object.ResourceName }} + "{{ $object.TerraformName }}": {{ $object.ResourceName }}(), + {{- end }} + {{- if $object.IamClassName }} + "{{ $object.TerraformName }}_iam_binding": tpgiamresource.ResourceIamBinding({{ $object.IamClassName }}IamSchema, {{ $object.IamClassName }}IamUpdaterProducer, {{ $object.IamClassName }}IdParseFunc), + "{{ $object.TerraformName }}_iam_member": tpgiamresource.ResourceIamMember({{ $object.IamClassName }}IamSchema, {{ $object.IamClassName }}IamUpdaterProducer, {{ $object.IamClassName }}IdParseFunc), + "{{ $object.TerraformName }}_iam_policy": tpgiamresource.ResourceIamPolicy({{ $object.IamClassName }}IamSchema, {{ $object.IamClassName }}IamUpdaterProducer, {{ $object.IamClassName }}IdParseFunc), + {{- end }} + {{- end }} +} + +var handwrittenResources = map[string]*schema.Resource{ + // ####### START handwritten resources ########### + "google_app_engine_application": appengine.ResourceAppEngineApplication(), + "google_apigee_sharedflow": apigee.ResourceApigeeSharedFlow(), + "google_apigee_sharedflow_deployment": apigee.ResourceApigeeSharedFlowDeployment(), + "google_apigee_flowhook": apigee.ResourceApigeeFlowhook(), + "google_apigee_keystores_aliases_pkcs12": apigee.ResourceApigeeKeystoresAliasesPkcs12(), + "google_apigee_keystores_aliases_key_cert_file": apigee.ResourceApigeeKeystoresAliasesKeyCertFile(), + "google_bigquery_table": bigquery.ResourceBigQueryTable(), + "google_bigtable_gc_policy": bigtable.ResourceBigtableGCPolicy(), + "google_bigtable_instance": bigtable.ResourceBigtableInstance(), + "google_bigtable_table": bigtable.ResourceBigtableTable(), + "google_bigtable_authorized_view": bigtable.ResourceBigtableAuthorizedView(), + "google_billing_subaccount": resourcemanager.ResourceBillingSubaccount(), + "google_cloudfunctions_function": cloudfunctions.ResourceCloudFunctionsFunction(), + "google_composer_environment": composer.ResourceComposerEnvironment(), + {{- if ne $.TargetVersionName "ga" }} + "google_composer_user_workloads_secret": composer.ResourceComposerUserWorkloadsSecret(), + {{- end }} + "google_compute_attached_disk": compute.ResourceComputeAttachedDisk(), + "google_compute_instance": compute.ResourceComputeInstance(), + "google_compute_disk_async_replication": compute.ResourceComputeDiskAsyncReplication(), + "google_compute_router_peer": compute.ResourceComputeRouterBgpPeer(), + {{- if ne $.TargetVersionName "ga" }} + "google_compute_instance_from_machine_image": compute.ResourceComputeInstanceFromMachineImage(), + {{- end }} + "google_compute_instance_from_template": compute.ResourceComputeInstanceFromTemplate(), + "google_compute_instance_group": compute.ResourceComputeInstanceGroup(), + "google_compute_instance_group_manager": compute.ResourceComputeInstanceGroupManager(), + "google_compute_instance_template": compute.ResourceComputeInstanceTemplate(), + "google_compute_network_peering": compute.ResourceComputeNetworkPeering(), + "google_compute_project_default_network_tier": compute.ResourceComputeProjectDefaultNetworkTier(), + "google_compute_project_metadata": compute.ResourceComputeProjectMetadata(), + "google_compute_project_metadata_item": compute.ResourceComputeProjectMetadataItem(), + "google_compute_region_instance_group_manager": compute.ResourceComputeRegionInstanceGroupManager(), + "google_compute_region_instance_template": compute.ResourceComputeRegionInstanceTemplate(), + "google_compute_router_interface": compute.ResourceComputeRouterInterface(), + "google_compute_security_policy": compute.ResourceComputeSecurityPolicy(), + "google_compute_shared_vpc_host_project": compute.ResourceComputeSharedVpcHostProject(), + "google_compute_shared_vpc_service_project": compute.ResourceComputeSharedVpcServiceProject(), + "google_compute_target_pool": compute.ResourceComputeTargetPool(), + "google_container_cluster": container.ResourceContainerCluster(), + "google_container_node_pool": container.ResourceContainerNodePool(), + "google_container_registry": containeranalysis.ResourceContainerRegistry(), + "google_dataflow_job": dataflow.ResourceDataflowJob(), + {{- if ne $.TargetVersionName "ga" }} + "google_dataflow_flex_template_job": dataflow.ResourceDataflowFlexTemplateJob(), + {{- end }} + "google_dataproc_cluster": dataproc.ResourceDataprocCluster(), + "google_dataproc_job": dataproc.ResourceDataprocJob(), + "google_dns_record_set": dns.ResourceDnsRecordSet(), + "google_endpoints_service": servicemanagement.ResourceEndpointsService(), + "google_folder": resourcemanager.ResourceGoogleFolder(), + "google_folder_organization_policy": resourcemanager.ResourceGoogleFolderOrganizationPolicy(), + "google_logging_billing_account_sink": logging.ResourceLoggingBillingAccountSink(), + "google_logging_billing_account_exclusion": logging.ResourceLoggingExclusion(logging.BillingAccountLoggingExclusionSchema, logging.NewBillingAccountLoggingExclusionUpdater, logging.BillingAccountLoggingExclusionIdParseFunc), + "google_logging_billing_account_bucket_config": logging.ResourceLoggingBillingAccountBucketConfig(), + "google_logging_organization_sink": logging.ResourceLoggingOrganizationSink(), + "google_logging_organization_exclusion": logging.ResourceLoggingExclusion(logging.OrganizationLoggingExclusionSchema, logging.NewOrganizationLoggingExclusionUpdater, logging.OrganizationLoggingExclusionIdParseFunc), + "google_logging_organization_bucket_config": logging.ResourceLoggingOrganizationBucketConfig(), + "google_logging_folder_sink": logging.ResourceLoggingFolderSink(), + "google_logging_folder_exclusion": logging.ResourceLoggingExclusion(logging.FolderLoggingExclusionSchema, logging.NewFolderLoggingExclusionUpdater, logging.FolderLoggingExclusionIdParseFunc), + "google_logging_folder_bucket_config": logging.ResourceLoggingFolderBucketConfig(), + "google_logging_project_sink": logging.ResourceLoggingProjectSink(), + "google_logging_project_exclusion": logging.ResourceLoggingExclusion(logging.ProjectLoggingExclusionSchema, logging.NewProjectLoggingExclusionUpdater, logging.ProjectLoggingExclusionIdParseFunc), + "google_logging_project_bucket_config": logging.ResourceLoggingProjectBucketConfig(), + "google_monitoring_dashboard": monitoring.ResourceMonitoringDashboard(), + "google_os_config_os_policy_assignment": osconfig.ResourceOSConfigOSPolicyAssignment(), + {{- if ne $.TargetVersionName "ga" }} + "google_project_service_identity": resourcemanager.ResourceProjectServiceIdentity(), + {{- end }} + "google_service_networking_connection": servicenetworking.ResourceServiceNetworkingConnection(), + "google_sql_database_instance": sql.ResourceSqlDatabaseInstance(), + "google_sql_ssl_cert": sql.ResourceSqlSslCert(), + "google_sql_user": sql.ResourceSqlUser(), + "google_organization_iam_custom_role": resourcemanager.ResourceGoogleOrganizationIamCustomRole(), + "google_organization_policy": resourcemanager.ResourceGoogleOrganizationPolicy(), + "google_project": resourcemanager.ResourceGoogleProject(), + "google_project_default_service_accounts": resourcemanager.ResourceGoogleProjectDefaultServiceAccounts(), + "google_project_service": resourcemanager.ResourceGoogleProjectService(), + "google_project_iam_custom_role": resourcemanager.ResourceGoogleProjectIamCustomRole(), + "google_project_iam_member_remove": resourcemanager.ResourceGoogleProjectIamMemberRemove(), + "google_project_organization_policy": resourcemanager.ResourceGoogleProjectOrganizationPolicy(), + "google_project_usage_export_bucket": compute.ResourceProjectUsageBucket(), + {{- if ne $.TargetVersionName "ga" }} + "google_runtimeconfig_config": runtimeconfig.ResourceRuntimeconfigConfig(), + "google_runtimeconfig_variable": runtimeconfig.ResourceRuntimeconfigVariable(), + {{- end }} + "google_service_account": resourcemanager.ResourceGoogleServiceAccount(), + "google_service_account_key": resourcemanager.ResourceGoogleServiceAccountKey(), + "google_service_networking_peered_dns_domain": servicenetworking.ResourceGoogleServiceNetworkingPeeredDNSDomain(), + "google_storage_bucket": storage.ResourceStorageBucket(), + "google_storage_bucket_acl": storage.ResourceStorageBucketAcl(), + "google_storage_bucket_object": storage.ResourceStorageBucketObject(), + "google_storage_object_acl": storage.ResourceStorageObjectAcl(), + "google_storage_default_object_acl": storage.ResourceStorageDefaultObjectAcl(), + "google_storage_notification": storage.ResourceStorageNotification(), + "google_storage_transfer_job": storagetransfer.ResourceStorageTransferJob(), + "google_tags_location_tag_binding": tags.ResourceTagsLocationTagBinding(), + // ####### END handwritten resources ########### +} + +var handwrittenIAMResources = map[string]*schema.Resource{ + // ####### START non-generated IAM resources ########### + "google_bigtable_instance_iam_binding": tpgiamresource.ResourceIamBinding(bigtable.IamBigtableInstanceSchema, bigtable.NewBigtableInstanceUpdater, bigtable.BigtableInstanceIdParseFunc), + "google_bigtable_instance_iam_member": tpgiamresource.ResourceIamMember(bigtable.IamBigtableInstanceSchema, bigtable.NewBigtableInstanceUpdater, bigtable.BigtableInstanceIdParseFunc), + "google_bigtable_instance_iam_policy": tpgiamresource.ResourceIamPolicy(bigtable.IamBigtableInstanceSchema, bigtable.NewBigtableInstanceUpdater, bigtable.BigtableInstanceIdParseFunc), + "google_bigtable_table_iam_binding": tpgiamresource.ResourceIamBinding(bigtable.IamBigtableTableSchema, bigtable.NewBigtableTableUpdater, bigtable.BigtableTableIdParseFunc), + "google_bigtable_table_iam_member": tpgiamresource.ResourceIamMember(bigtable.IamBigtableTableSchema, bigtable.NewBigtableTableUpdater, bigtable.BigtableTableIdParseFunc), + "google_bigtable_table_iam_policy": tpgiamresource.ResourceIamPolicy(bigtable.IamBigtableTableSchema, bigtable.NewBigtableTableUpdater, bigtable.BigtableTableIdParseFunc), + "google_bigquery_dataset_iam_binding": tpgiamresource.ResourceIamBinding(bigquery.IamBigqueryDatasetSchema, bigquery.NewBigqueryDatasetIamUpdater, bigquery.BigqueryDatasetIdParseFunc), + "google_bigquery_dataset_iam_member": tpgiamresource.ResourceIamMember(bigquery.IamMemberBigqueryDatasetSchema, bigquery.NewBigqueryDatasetIamMemberUpdater, bigquery.BigqueryDatasetIdParseFunc), + "google_bigquery_dataset_iam_policy": tpgiamresource.ResourceIamPolicy(bigquery.IamBigqueryDatasetSchema, bigquery.NewBigqueryDatasetIamUpdater, bigquery.BigqueryDatasetIdParseFunc), + "google_billing_account_iam_binding": tpgiamresource.ResourceIamBinding(billing.IamBillingAccountSchema, billing.NewBillingAccountIamUpdater, billing.BillingAccountIdParseFunc), + "google_billing_account_iam_member": tpgiamresource.ResourceIamMember(billing.IamBillingAccountSchema, billing.NewBillingAccountIamUpdater, billing.BillingAccountIdParseFunc), + "google_billing_account_iam_policy": tpgiamresource.ResourceIamPolicy(billing.IamBillingAccountSchema, billing.NewBillingAccountIamUpdater, billing.BillingAccountIdParseFunc), + "google_dataproc_cluster_iam_binding": tpgiamresource.ResourceIamBinding(dataproc.IamDataprocClusterSchema, dataproc.NewDataprocClusterUpdater, dataproc.DataprocClusterIdParseFunc), + "google_dataproc_cluster_iam_member": tpgiamresource.ResourceIamMember(dataproc.IamDataprocClusterSchema, dataproc.NewDataprocClusterUpdater, dataproc.DataprocClusterIdParseFunc), + "google_dataproc_cluster_iam_policy": tpgiamresource.ResourceIamPolicy(dataproc.IamDataprocClusterSchema, dataproc.NewDataprocClusterUpdater, dataproc.DataprocClusterIdParseFunc), + "google_dataproc_job_iam_binding": tpgiamresource.ResourceIamBinding(dataproc.IamDataprocJobSchema, dataproc.NewDataprocJobUpdater, dataproc.DataprocJobIdParseFunc), + "google_dataproc_job_iam_member": tpgiamresource.ResourceIamMember(dataproc.IamDataprocJobSchema, dataproc.NewDataprocJobUpdater, dataproc.DataprocJobIdParseFunc), + "google_dataproc_job_iam_policy": tpgiamresource.ResourceIamPolicy(dataproc.IamDataprocJobSchema, dataproc.NewDataprocJobUpdater, dataproc.DataprocJobIdParseFunc), + "google_folder_iam_binding": tpgiamresource.ResourceIamBinding(resourcemanager.IamFolderSchema, resourcemanager.NewFolderIamUpdater, resourcemanager.FolderIdParseFunc), + "google_folder_iam_member": tpgiamresource.ResourceIamMember(resourcemanager.IamFolderSchema, resourcemanager.NewFolderIamUpdater, resourcemanager.FolderIdParseFunc), + "google_folder_iam_policy": tpgiamresource.ResourceIamPolicy(resourcemanager.IamFolderSchema, resourcemanager.NewFolderIamUpdater, resourcemanager.FolderIdParseFunc), + "google_folder_iam_audit_config": tpgiamresource.ResourceIamAuditConfig(resourcemanager.IamFolderSchema, resourcemanager.NewFolderIamUpdater, resourcemanager.FolderIdParseFunc), + "google_healthcare_dataset_iam_binding": tpgiamresource.ResourceIamBinding(healthcare.IamHealthcareDatasetSchema, healthcare.NewHealthcareDatasetIamUpdater, healthcare.DatasetIdParseFunc, tpgiamresource.IamWithBatching), + "google_healthcare_dataset_iam_member": tpgiamresource.ResourceIamMember(healthcare.IamHealthcareDatasetSchema, healthcare.NewHealthcareDatasetIamUpdater, healthcare.DatasetIdParseFunc, tpgiamresource.IamWithBatching), + "google_healthcare_dataset_iam_policy": tpgiamresource.ResourceIamPolicy(healthcare.IamHealthcareDatasetSchema, healthcare.NewHealthcareDatasetIamUpdater, healthcare.DatasetIdParseFunc), + "google_healthcare_dicom_store_iam_binding": tpgiamresource.ResourceIamBinding(healthcare.IamHealthcareDicomStoreSchema, healthcare.NewHealthcareDicomStoreIamUpdater, healthcare.DicomStoreIdParseFunc, tpgiamresource.IamWithBatching), + "google_healthcare_dicom_store_iam_member": tpgiamresource.ResourceIamMember(healthcare.IamHealthcareDicomStoreSchema, healthcare.NewHealthcareDicomStoreIamUpdater, healthcare.DicomStoreIdParseFunc, tpgiamresource.IamWithBatching), + "google_healthcare_dicom_store_iam_policy": tpgiamresource.ResourceIamPolicy(healthcare.IamHealthcareDicomStoreSchema, healthcare.NewHealthcareDicomStoreIamUpdater, healthcare.DicomStoreIdParseFunc), + "google_healthcare_fhir_store_iam_binding": tpgiamresource.ResourceIamBinding(healthcare.IamHealthcareFhirStoreSchema, healthcare.NewHealthcareFhirStoreIamUpdater, healthcare.FhirStoreIdParseFunc, tpgiamresource.IamWithBatching), + "google_healthcare_fhir_store_iam_member": tpgiamresource.ResourceIamMember(healthcare.IamHealthcareFhirStoreSchema, healthcare.NewHealthcareFhirStoreIamUpdater, healthcare.FhirStoreIdParseFunc, tpgiamresource.IamWithBatching), + "google_healthcare_fhir_store_iam_policy": tpgiamresource.ResourceIamPolicy(healthcare.IamHealthcareFhirStoreSchema, healthcare.NewHealthcareFhirStoreIamUpdater, healthcare.FhirStoreIdParseFunc), + "google_healthcare_hl7_v2_store_iam_binding": tpgiamresource.ResourceIamBinding(healthcare.IamHealthcareHl7V2StoreSchema, healthcare.NewHealthcareHl7V2StoreIamUpdater, healthcare.Hl7V2StoreIdParseFunc, tpgiamresource.IamWithBatching), + "google_healthcare_hl7_v2_store_iam_member": tpgiamresource.ResourceIamMember(healthcare.IamHealthcareHl7V2StoreSchema, healthcare.NewHealthcareHl7V2StoreIamUpdater, healthcare.Hl7V2StoreIdParseFunc, tpgiamresource.IamWithBatching), + "google_healthcare_hl7_v2_store_iam_policy": tpgiamresource.ResourceIamPolicy(healthcare.IamHealthcareHl7V2StoreSchema, healthcare.NewHealthcareHl7V2StoreIamUpdater, healthcare.Hl7V2StoreIdParseFunc), + "google_kms_key_ring_iam_binding": tpgiamresource.ResourceIamBinding(kms.IamKmsKeyRingSchema, kms.NewKmsKeyRingIamUpdater, kms.KeyRingIdParseFunc), + "google_kms_key_ring_iam_member": tpgiamresource.ResourceIamMember(kms.IamKmsKeyRingSchema, kms.NewKmsKeyRingIamUpdater, kms.KeyRingIdParseFunc), + "google_kms_key_ring_iam_policy": tpgiamresource.ResourceIamPolicy(kms.IamKmsKeyRingSchema, kms.NewKmsKeyRingIamUpdater, kms.KeyRingIdParseFunc), + "google_kms_crypto_key_iam_binding": tpgiamresource.ResourceIamBinding(kms.IamKmsCryptoKeySchema, kms.NewKmsCryptoKeyIamUpdater, kms.CryptoIdParseFunc), + "google_kms_crypto_key_iam_member": tpgiamresource.ResourceIamMember(kms.IamKmsCryptoKeySchema, kms.NewKmsCryptoKeyIamUpdater, kms.CryptoIdParseFunc), + "google_kms_crypto_key_iam_policy": tpgiamresource.ResourceIamPolicy(kms.IamKmsCryptoKeySchema, kms.NewKmsCryptoKeyIamUpdater, kms.CryptoIdParseFunc), + "google_spanner_instance_iam_binding": tpgiamresource.ResourceIamBinding(spanner.IamSpannerInstanceSchema, spanner.NewSpannerInstanceIamUpdater, spanner.SpannerInstanceIdParseFunc), + "google_spanner_instance_iam_member": tpgiamresource.ResourceIamMember(spanner.IamSpannerInstanceSchema, spanner.NewSpannerInstanceIamUpdater, spanner.SpannerInstanceIdParseFunc), + "google_spanner_instance_iam_policy": tpgiamresource.ResourceIamPolicy(spanner.IamSpannerInstanceSchema, spanner.NewSpannerInstanceIamUpdater, spanner.SpannerInstanceIdParseFunc), + "google_spanner_database_iam_binding": tpgiamresource.ResourceIamBinding(spanner.IamSpannerDatabaseSchema, spanner.NewSpannerDatabaseIamUpdater, spanner.SpannerDatabaseIdParseFunc), + "google_spanner_database_iam_member": tpgiamresource.ResourceIamMember(spanner.IamSpannerDatabaseSchema, spanner.NewSpannerDatabaseIamUpdater, spanner.SpannerDatabaseIdParseFunc), + "google_spanner_database_iam_policy": tpgiamresource.ResourceIamPolicy(spanner.IamSpannerDatabaseSchema, spanner.NewSpannerDatabaseIamUpdater, spanner.SpannerDatabaseIdParseFunc), + "google_storage_managed_folder_iam_binding": tpgiamresource.ResourceIamBinding(storage.StorageManagedFolderIamSchema, storage.StorageManagedFolderIamUpdaterProducer, storage.StorageManagedFolderIdParseFunc), + "google_storage_managed_folder_iam_member": tpgiamresource.ResourceIamMember(storage.StorageManagedFolderIamSchema, storage.StorageManagedFolderIamUpdaterProducer, storage.StorageManagedFolderIdParseFunc), + "google_storage_managed_folder_iam_policy": tpgiamresource.ResourceIamPolicy(storage.StorageManagedFolderIamSchema, storage.StorageManagedFolderIamUpdaterProducer, storage.StorageManagedFolderIdParseFunc), + "google_organization_iam_binding": tpgiamresource.ResourceIamBinding(resourcemanager.IamOrganizationSchema, resourcemanager.NewOrganizationIamUpdater, resourcemanager.OrgIdParseFunc), + "google_organization_iam_member": tpgiamresource.ResourceIamMember(resourcemanager.IamOrganizationSchema, resourcemanager.NewOrganizationIamUpdater, resourcemanager.OrgIdParseFunc), + "google_organization_iam_policy": tpgiamresource.ResourceIamPolicy(resourcemanager.IamOrganizationSchema, resourcemanager.NewOrganizationIamUpdater, resourcemanager.OrgIdParseFunc), + "google_organization_iam_audit_config": tpgiamresource.ResourceIamAuditConfig(resourcemanager.IamOrganizationSchema, resourcemanager.NewOrganizationIamUpdater, resourcemanager.OrgIdParseFunc), + "google_project_iam_policy": tpgiamresource.ResourceIamPolicy(resourcemanager.IamProjectSchema, resourcemanager.NewProjectIamUpdater, resourcemanager.ProjectIdParseFunc), + "google_project_iam_binding": tpgiamresource.ResourceIamBinding(resourcemanager.IamProjectSchema, resourcemanager.NewProjectIamUpdater, resourcemanager.ProjectIdParseFunc, tpgiamresource.IamWithBatching), + "google_project_iam_member": tpgiamresource.ResourceIamMember(resourcemanager.IamProjectSchema, resourcemanager.NewProjectIamUpdater, resourcemanager.ProjectIdParseFunc, tpgiamresource.IamWithBatching), + "google_project_iam_audit_config": tpgiamresource.ResourceIamAuditConfig(resourcemanager.IamProjectSchema, resourcemanager.NewProjectIamUpdater, resourcemanager.ProjectIdParseFunc, tpgiamresource.IamWithBatching), + "google_pubsub_subscription_iam_binding": tpgiamresource.ResourceIamBinding(pubsub.IamPubsubSubscriptionSchema, pubsub.NewPubsubSubscriptionIamUpdater, pubsub.PubsubSubscriptionIdParseFunc), + "google_pubsub_subscription_iam_member": tpgiamresource.ResourceIamMember(pubsub.IamPubsubSubscriptionSchema, pubsub.NewPubsubSubscriptionIamUpdater, pubsub.PubsubSubscriptionIdParseFunc), + "google_pubsub_subscription_iam_policy": tpgiamresource.ResourceIamPolicy(pubsub.IamPubsubSubscriptionSchema, pubsub.NewPubsubSubscriptionIamUpdater, pubsub.PubsubSubscriptionIdParseFunc), + "google_service_account_iam_binding": tpgiamresource.ResourceIamBinding(resourcemanager.IamServiceAccountSchema, resourcemanager.NewServiceAccountIamUpdater, resourcemanager.ServiceAccountIdParseFunc), + "google_service_account_iam_member": tpgiamresource.ResourceIamMember(resourcemanager.IamServiceAccountSchema, resourcemanager.NewServiceAccountIamUpdater, resourcemanager.ServiceAccountIdParseFunc), + "google_service_account_iam_policy": tpgiamresource.ResourceIamPolicy(resourcemanager.IamServiceAccountSchema, resourcemanager.NewServiceAccountIamUpdater, resourcemanager.ServiceAccountIdParseFunc), + // ####### END non-generated IAM resources ########### +} diff --git a/mmv1/third_party/terraform/services/cloudrun/go/resource_cloud_run_service_test.go.tmpl b/mmv1/third_party/terraform/services/cloudrun/go/resource_cloud_run_service_test.go.tmpl index 26df594b3768..fb6619b5d5ed 100644 --- a/mmv1/third_party/terraform/services/cloudrun/go/resource_cloud_run_service_test.go.tmpl +++ b/mmv1/third_party/terraform/services/cloudrun/go/resource_cloud_run_service_test.go.tmpl @@ -1312,7 +1312,6 @@ resource "google_cloud_run_service" "default" { `, context) } -{{ if ne $.TargetVersionName `ga` -}} func TestAccCloudRunService_csiVolume(t *testing.T) { acctest.SkipIfVcr(t) @@ -1323,10 +1322,10 @@ func TestAccCloudRunService_csiVolume(t *testing.T) { acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), Steps: []resource.TestStep{ { - Config: testAccCloudRunService_cloudRunServiceWithEmptyDirVolume(name, project), + Config: testAccCloudRunService_cloudRunServiceWithNoVolume(name, project), }, { ResourceName: "google_cloud_run_service.default", @@ -1348,10 +1347,9 @@ func TestAccCloudRunService_csiVolume(t *testing.T) { } -func testAccCloudRunService_cloudRunServiceWithEmptyDirVolume(name, project string) string { +func testAccCloudRunService_cloudRunServiceWithNoVolume(name, project string) string { return fmt.Sprintf(` resource "google_cloud_run_service" "default" { - provider = google-beta name = "%s" location = "us-central1" @@ -1359,7 +1357,6 @@ resource "google_cloud_run_service" "default" { namespace = "%s" annotations = { generated-by = "magic-modules" - "run.googleapis.com/launch-stage" = "BETA" } } @@ -1367,14 +1364,6 @@ resource "google_cloud_run_service" "default" { spec { containers { image = "gcr.io/cloudrun/hello" - volume_mounts { - name = "vol1" - mount_path = "/mnt/vol1" - } - } - volumes { - name = "vol1" - empty_dir { size_limit = "256Mi" } } } } @@ -1388,10 +1377,10 @@ resource "google_cloud_run_service" "default" { `, name, project) } + func testAccCloudRunService_cloudRunServiceUpdateWithGcsVolume(name, project string) string { return fmt.Sprintf(` resource "google_cloud_run_service" "default" { - provider = google-beta name = "%s" location = "us-central1" @@ -1399,7 +1388,6 @@ resource "google_cloud_run_service" "default" { namespace = "%s" annotations = { generated-by = "magic-modules" - "run.googleapis.com/launch-stage" = "BETA" } } @@ -1430,13 +1418,68 @@ resource "google_cloud_run_service" "default" { } } - lifecycle { - ignore_changes = [ - metadata.0.annotations, - ] - } } `, name, project) } -{{ end }} + {{ if ne $.TargetVersionName `ga` -}} + +func TestAccCloudRunService_emptyDirVolume(t *testing.T) { + t.Parallel() + + project := envvar.GetTestProjectFromEnv() + name := "tftest-cloudrun-" + acctest.RandString(t, 6) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudRunService_cloudRunServiceWithEmptyDirVolume(name, project), + }, + { + ResourceName: "google_cloud_run_service.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"metadata.0.resource_version", "metadata.0.annotations", "metadata.0.labels", "metadata.0.terraform_labels", "status.0.conditions"}, + }, + }, + }) + } + + +func testAccCloudRunService_cloudRunServiceWithEmptyDirVolume(name, project string) string { + return fmt.Sprintf(` +resource "google_cloud_run_service" "default" { + provider = google-beta + name = "%s" + location = "us-central1" + + metadata { + namespace = "%s" + annotations = { + generated-by = "magic-modules" + "run.googleapis.com/launch-stage" = "BETA" + } + } + + template { + spec { + containers { + image = "gcr.io/cloudrun/hello" + volume_mounts { + name = "vol1" + mount_path = "/mnt/vol1" + } + } + volumes { + name = "vol1" + empty_dir { size_limit = "256Mi" } + } + } + } + +} +`, name, project) +} + {{- end }} diff --git a/mmv1/third_party/terraform/services/cloudrunv2/go/resource_cloud_run_v2_job_test.go.tmpl b/mmv1/third_party/terraform/services/cloudrunv2/go/resource_cloud_run_v2_job_test.go.tmpl index 849e6a9df32c..2100167eb985 100644 --- a/mmv1/third_party/terraform/services/cloudrunv2/go/resource_cloud_run_v2_job_test.go.tmpl +++ b/mmv1/third_party/terraform/services/cloudrunv2/go/resource_cloud_run_v2_job_test.go.tmpl @@ -308,7 +308,6 @@ func testAccCloudRunV2Job_cloudrunv2JobWithDirectVPCAndNamedBinAuthPolicyUpdate( `, context) } -{{ if ne $.TargetVersionName `ga` -}} func TestAccCloudRunV2Job_cloudrunv2JobWithGcsUpdate(t *testing.T) { acctest.SkipIfVcr(t) t.Parallel() @@ -351,7 +350,6 @@ func testAccCloudRunV2Job_cloudrunv2JobWithNoVolume(context map[string]interface name = "%{job_name}" location = "us-central1" deletion_protection = false - launch_stage = "BETA" template { template { containers { @@ -375,7 +373,6 @@ func testAccCloudRunV2Job_cloudrunv2JobWithGcsVolume(context map[string]interfac name = "%{job_name}" location = "us-central1" deletion_protection = false - launch_stage = "BETA" template { template { containers { @@ -394,6 +391,11 @@ func testAccCloudRunV2Job_cloudrunv2JobWithGcsVolume(context map[string]interfac } } } + lifecycle { + ignore_changes = [ + launch_stage, + ] + } } `, context) } @@ -440,7 +442,6 @@ func testAccCloudRunV2Job_cloudrunv2JobWithNfsVolume(context map[string]interfac name = "%{job_name}" location = "us-central1" deletion_protection = false - launch_stage = "BETA" template { template { containers { @@ -460,10 +461,16 @@ func testAccCloudRunV2Job_cloudrunv2JobWithNfsVolume(context map[string]interfac } } } + lifecycle { + ignore_changes = [ + launch_stage, + ] + } } `, context) } +{{ if ne $.TargetVersionName `ga` -}} func TestAccCloudRunV2Job_cloudrunv2JobWithStartExecutionTokenUpdate(t *testing.T) { t.Parallel() diff --git a/mmv1/third_party/terraform/services/cloudrunv2/go/resource_cloud_run_v2_service_test.go.tmpl b/mmv1/third_party/terraform/services/cloudrunv2/go/resource_cloud_run_v2_service_test.go.tmpl index 1da7d73b47ea..96026c217727 100644 --- a/mmv1/third_party/terraform/services/cloudrunv2/go/resource_cloud_run_v2_service_test.go.tmpl +++ b/mmv1/third_party/terraform/services/cloudrunv2/go/resource_cloud_run_v2_service_test.go.tmpl @@ -209,7 +209,6 @@ resource "google_compute_network" "custom_test" { } `, context) } -{{- if ne $.TargetVersionName "ga" }} func TestAccCloudRunV2Service_cloudrunv2ServiceGcsVolume(t *testing.T) { acctest.SkipIfVcr(t) t.Parallel() @@ -243,7 +242,7 @@ resource "google_cloud_run_v2_service" "default" { description = "description creating" location = "us-central1" deletion_protection = false - launch_stage = "BETA" + annotations = { generated-by = "magic-modules" } @@ -312,7 +311,6 @@ resource "google_service_account" "service_account" { } `, context) } -{{- end }} func TestAccCloudRunV2Service_cloudrunv2ServiceTCPProbesUpdate(t *testing.T) { t.Parallel() @@ -1075,3 +1073,124 @@ resource "google_cloud_run_v2_service" "default" { `, context) } {{- end }} + +{{ if ne $.TargetVersionName `ga` -}} +func TestAccCloudRunV2Service_cloudrunv2ServiceMeshUpdate(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + ExternalProviders: map[string]resource.ExternalProvider{ + "time": {}, + }, + CheckDestroy: testAccCheckCloudRunV2ServiceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudRunV2Service_cloudrunv2ServiceMesh(context), + }, + { + ResourceName: "google_cloud_run_v2_service.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "location", "annotations", "labels", "terraform_labels", "launch_stage", "deletion_protection"}, + }, + { + Config: testAccCloudRunV2Service_cloudrunv2ServiceMeshUpdate(context), + }, + { + ResourceName: "google_cloud_run_v2_service.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "location", "annotations", "labels", "terraform_labels", "launch_stage", "deletion_protection"}, + }, + }, + }) +} + +func testAccCloudRunV2Service_cloudrunv2ServiceMesh(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_cloud_run_v2_service" "default" { + provider = google-beta + name = "tf-test-cloudrun-service%{random_suffix}" + location = "us-central1" + deletion_protection = false + depends_on = [time_sleep.wait_for_mesh] + launch_stage = "BETA" + template { + containers { + name = "container-1" + image = "us-docker.pkg.dev/cloudrun/container/hello" + } + service_mesh { + mesh = google_network_services_mesh.mesh.id + } + } +} + +resource "time_sleep" "wait_for_mesh" { + depends_on = [ + google_network_services_mesh.mesh, + google_network_services_mesh.new_mesh, + ] + + create_duration = "1m" +} + +resource "google_network_services_mesh" "mesh" { + provider = google-beta + name = "tf-test-mesh%{random_suffix}" +} + +resource "google_network_services_mesh" "new_mesh" { + provider = google-beta + name = "tf-test-new-mesh%{random_suffix}" +} +`, context) +} + +func testAccCloudRunV2Service_cloudrunv2ServiceMeshUpdate(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_cloud_run_v2_service" "default" { + provider = google-beta + name = "tf-test-cloudrun-service%{random_suffix}" + location = "us-central1" + deletion_protection = false + depends_on = [time_sleep.wait_for_mesh] + launch_stage = "BETA" + template { + containers { + name = "container-1" + image = "us-docker.pkg.dev/cloudrun/container/hello" + } + service_mesh { + mesh = google_network_services_mesh.new_mesh.id + } + } +} + +resource "time_sleep" "wait_for_mesh" { + depends_on = [ + google_network_services_mesh.mesh, + google_network_services_mesh.new_mesh, + ] + + create_duration = "1m" +} + +resource "google_network_services_mesh" "mesh" { + provider = google-beta + name = "tf-test-mesh%{random_suffix}" +} + +resource "google_network_services_mesh" "new_mesh" { + provider = google-beta + name = "tf-test-new-mesh%{random_suffix}" +} +`, context) +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_router_peer.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_router_peer.go.tmpl index 3ef44a99803d..5d01040945e0 100644 --- a/mmv1/third_party/terraform/services/compute/go/resource_compute_router_peer.go.tmpl +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_router_peer.go.tmpl @@ -419,7 +419,7 @@ func resourceComputeRouterBgpPeerCreate(d *schema.ResourceData, meta interface{} advertisedRoutePriorityProp, err := expandNestedComputeRouterBgpPeerAdvertisedRoutePriority(d.Get("advertised_route_priority"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("advertised_route_priority"); ok || !reflect.DeepEqual(v, advertisedRoutePriorityProp) { + } else if v, ok := d.GetOk("advertised_route_priority"); ok || !reflect.DeepEqual(v, advertisedRoutePriorityProp) { obj["advertisedRoutePriority"] = advertisedRoutePriorityProp } advertiseModeProp, err := expandNestedComputeRouterBgpPeerAdvertiseMode(d.Get("advertise_mode"), d, config) @@ -772,7 +772,7 @@ func resourceComputeRouterBgpPeerUpdate(d *schema.ResourceData, meta interface{} advertisedRoutePriorityProp, err := expandNestedComputeRouterBgpPeerAdvertisedRoutePriority(d.Get("advertised_route_priority"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("advertised_route_priority"); ok || !reflect.DeepEqual(v, advertisedRoutePriorityProp) { + } else if v, ok := d.GetOk("advertised_route_priority"); ok || !reflect.DeepEqual(v, advertisedRoutePriorityProp) { obj["advertisedRoutePriority"] = advertisedRoutePriorityProp } advertiseModeProp, err := expandNestedComputeRouterBgpPeerAdvertiseMode(d.Get("advertise_mode"), d, config) diff --git a/mmv1/third_party/terraform/services/container/go/node_config.go.tmpl b/mmv1/third_party/terraform/services/container/go/node_config.go.tmpl index eded879ff7ee..179d06855493 100644 --- a/mmv1/third_party/terraform/services/container/go/node_config.go.tmpl +++ b/mmv1/third_party/terraform/services/container/go/node_config.go.tmpl @@ -110,7 +110,6 @@ func schemaGcfsConfig() *schema.Schema { "enabled": { Type: schema.TypeBool, Required: true, - ForceNew: forceNew, Description: `Whether or not GCFS is enabled`, }, }, diff --git a/mmv1/third_party/terraform/services/storage/go/resource_storage_bucket.go b/mmv1/third_party/terraform/services/storage/go/resource_storage_bucket.go index d8b0d19a4763..e1fa43b702d5 100644 --- a/mmv1/third_party/terraform/services/storage/go/resource_storage_bucket.go +++ b/mmv1/third_party/terraform/services/storage/go/resource_storage_bucket.go @@ -708,6 +708,7 @@ func resourceStorageBucketCreate(d *schema.ResourceData, meta interface{}) error return err }, Timeout: d.Timeout(schema.TimeoutCreate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.Is429RetryableQuotaError}, }) if err != nil { diff --git a/mmv1/third_party/terraform/sweeper/go/gcp_sweeper_test.go.tmpl b/mmv1/third_party/terraform/sweeper/go/gcp_sweeper_test.go.tmpl new file mode 100644 index 000000000000..c10152aabb2a --- /dev/null +++ b/mmv1/third_party/terraform/sweeper/go/gcp_sweeper_test.go.tmpl @@ -0,0 +1,30 @@ + +package sweeper_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + +{{- range $product := $.Products }} + _ "github.com/hashicorp/terraform-provider-google/google/services/{{ lower $product.Name }}" +{{- end }} + + // Manually add the services for DCL resource and handwritten resource sweepers if they are not in the above list + _ "github.com/hashicorp/terraform-provider-google/google/services/apikeys" + _ "github.com/hashicorp/terraform-provider-google/google/services/clouddeploy" + _ "github.com/hashicorp/terraform-provider-google/google/services/composer" + _ "github.com/hashicorp/terraform-provider-google/google/services/container" + _ "github.com/hashicorp/terraform-provider-google/google/services/containeraws" + _ "github.com/hashicorp/terraform-provider-google/google/services/containerazure" + _ "github.com/hashicorp/terraform-provider-google/google/services/dataflow" + _ "github.com/hashicorp/terraform-provider-google/google/services/eventarc" + _ "github.com/hashicorp/terraform-provider-google/google/services/firebase" + _ "github.com/hashicorp/terraform-provider-google/google/services/firebaserules" + _ "github.com/hashicorp/terraform-provider-google/google/services/networkconnectivity" + _ "github.com/hashicorp/terraform-provider-google/google/services/recaptchaenterprise" +) + +func TestMain(m *testing.M) { + resource.TestMain(m) +} diff --git a/mmv1/third_party/terraform/transport/go/config.go.tmpl b/mmv1/third_party/terraform/transport/go/config.go.tmpl new file mode 100644 index 000000000000..94506f6230c9 --- /dev/null +++ b/mmv1/third_party/terraform/transport/go/config.go.tmpl @@ -0,0 +1,1354 @@ +package transport + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "log" + "net/http" + "regexp" + "strconv" + "strings" + "time" + "os" + + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + + grpc_logrus "github.com/grpc-ecosystem/go-grpc-middleware/logging/logrus" + + "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/sirupsen/logrus" + "google.golang.org/api/option" + "google.golang.org/api/option/internaloption" + + "github.com/hashicorp/terraform-provider-google/google/verify" + + "golang.org/x/oauth2" + "google.golang.org/grpc" + googleoauth "golang.org/x/oauth2/google" + appengine "google.golang.org/api/appengine/v1" + "google.golang.org/api/bigquery/v2" + "google.golang.org/api/bigtableadmin/v2" + "google.golang.org/api/certificatemanager/v1" + "google.golang.org/api/cloudbilling/v1" + "google.golang.org/api/cloudbuild/v1" +{{- if ne $.TargetVersionName "ga" }} + cloudidentity "google.golang.org/api/cloudidentity/v1beta1" +{{- else }} + "google.golang.org/api/cloudidentity/v1" +{{- end }} + "google.golang.org/api/cloudfunctions/v1" + "google.golang.org/api/cloudiot/v1" + "google.golang.org/api/cloudkms/v1" + "google.golang.org/api/cloudresourcemanager/v1" + resourceManagerV3 "google.golang.org/api/cloudresourcemanager/v3" +{{- if eq $.TargetVersionName "ga" }} + "google.golang.org/api/composer/v1" +{{- else }} + "google.golang.org/api/composer/v1beta1" +{{- end }} +{{- if eq $.TargetVersionName "ga" }} + "google.golang.org/api/compute/v1" +{{- else }} + compute "google.golang.org/api/compute/v0.beta" +{{- end }} +{{- if eq $.TargetVersionName "ga" }} + "google.golang.org/api/container/v1" +{{- else }} + container "google.golang.org/api/container/v1beta1" +{{- end }} + dataflow "google.golang.org/api/dataflow/v1b3" + "google.golang.org/api/dataproc/v1" + "google.golang.org/api/dns/v1" + healthcare "google.golang.org/api/healthcare/v1" + "google.golang.org/api/iam/v1" + iamcredentials "google.golang.org/api/iamcredentials/v1" + cloudlogging "google.golang.org/api/logging/v2" + "google.golang.org/api/pubsub/v1" + runadminv2 "google.golang.org/api/run/v2" +{{- if ne $.TargetVersionName "ga" }} + runtimeconfig "google.golang.org/api/runtimeconfig/v1beta1" +{{- end }} + "google.golang.org/api/servicemanagement/v1" + "google.golang.org/api/servicenetworking/v1" + "google.golang.org/api/serviceusage/v1" + "google.golang.org/api/sourcerepo/v1" + "google.golang.org/api/spanner/v1" + sqladmin "google.golang.org/api/sqladmin/v1beta4" + "google.golang.org/api/storage/v1" + "google.golang.org/api/storagetransfer/v1" + "google.golang.org/api/transport" +) + +type ProviderMeta struct { + ModuleName string `cty:"module_name"` +} + +type Formatter struct { + TimestampFormat string + LogFormat string +} + +// Borrowed logic from https://github.com/sirupsen/logrus/blob/master/json_formatter.go and https://github.com/t-tomalak/logrus-easy-formatter/blob/master/formatter.go +func (f *Formatter) Format(entry *logrus.Entry) ([]byte, error) { + // Suppress logs if TF_LOG is not DEBUG or TRACE + if !logging.IsDebugOrHigher() { + return nil, nil + } + + // Also suppress based on log content + // - frequent transport spam + // - ListenSocket logs from gRPC + isTransportSpam := strings.Contains(entry.Message, "transport is closing") + listenSocketRegex := regexp.MustCompile(`\[Server #\d+( ListenSocket #\d+)*\]`) // Match patterns like `[Server #00]` or `[Server #00 ListenSocket #00]` + isListenSocketLog := listenSocketRegex.MatchString(entry.Message) + if isTransportSpam || isListenSocketLog { + return nil, nil + } + + output := f.LogFormat + entry.Level = logrus.DebugLevel // Force Entries to be Debug + + timestampFormat := f.TimestampFormat + + output = strings.Replace(output, "%time%", entry.Time.Format(timestampFormat), 1) + + output = strings.Replace(output, "%msg%", entry.Message, 1) + + level := strings.ToUpper(entry.Level.String()) + output = strings.Replace(output, "%lvl%", level, 1) + + var gRPCMessageFlag bool + for k, val := range entry.Data { + switch v := val.(type) { + case string: + output = strings.Replace(output, "%"+k+"%", v, 1) + case int: + s := strconv.Itoa(v) + output = strings.Replace(output, "%"+k+"%", s, 1) + case bool: + s := strconv.FormatBool(v) + output = strings.Replace(output, "%"+k+"%", s, 1) + } + + if k != "system" { + gRPCMessageFlag = true + } + } + + if gRPCMessageFlag { + data := make(logrus.Fields, len(entry.Data)+4) + for k, v := range entry.Data { + switch v := v.(type) { + case error: + // Otherwise errors are ignored by `encoding/json` + // https://github.com/sirupsen/logrus/issues/137 + data[k] = v.Error() + default: + data[k] = v + } + } + + var b *bytes.Buffer + if entry.Buffer != nil { + b = entry.Buffer + } else { + b = &bytes.Buffer{} + } + + encoder := json.NewEncoder(b) + encoder.SetIndent("", " ") + if err := encoder.Encode(data); err != nil { + return nil, fmt.Errorf("failed to marshal fields to JSON, %w", err) + } + + finalOutput := append([]byte(output), b.Bytes()...) + return finalOutput, nil + } + + return []byte(output), nil +} + +// Config is the configuration structure used to instantiate the Google +// provider. +type Config struct { + DCLConfig + AccessToken string + Credentials string + ImpersonateServiceAccount string + ImpersonateServiceAccountDelegates []string + Project string + Region string + BillingProject string + Zone string + UniverseDomain string + Scopes []string + BatchingConfig *BatchingConfig + UserProjectOverride bool + RequestReason string + RequestTimeout time.Duration + DefaultLabels map[string]string + AddTerraformAttributionLabel bool + TerraformAttributionLabelAdditionStrategy string + // PollInterval is passed to retry.StateChangeConf in common_operation.go + // It controls the interval at which we poll for successful operations + PollInterval time.Duration + + Client *http.Client + Context context.Context + UserAgent string + gRPCLoggingOptions []option.ClientOption + + tokenSource oauth2.TokenSource + + {{ range $product := $.Products }} + {{ $product.Name }}BasePath string + {{- end }} + + CloudBillingBasePath string + ContainerBasePath string + DataflowBasePath string + IamCredentialsBasePath string + ResourceManagerV3BasePath string + IAMBasePath string + CloudIoTBasePath string + BigtableAdminBasePath string + TagsLocationBasePath string + + // dcl + ContainerAwsBasePath string + ContainerAzureBasePath string + + RequestBatcherServiceUsage *RequestBatcher + RequestBatcherIam *RequestBatcher +} + +{{- range $product := $.Products }} +const {{ $product.Name }}BasePathKey = "{{ $product.Name }}" +{{- end }} +const CloudBillingBasePathKey = "CloudBilling" +const ContainerBasePathKey = "Container" +const DataflowBasePathKey = "Dataflow" +const IAMBasePathKey = "IAM" +const IamCredentialsBasePathKey = "IamCredentials" +const ResourceManagerV3BasePathKey = "ResourceManagerV3" +const BigtableAdminBasePathKey = "BigtableAdmin" +const ContainerAwsBasePathKey = "ContainerAws" +const ContainerAzureBasePathKey = "ContainerAzure" +const TagsLocationBasePathKey = "TagsLocation" + +// Generated product base paths +var DefaultBasePaths = map[string]string{ +{{- range $product := $.Products }} + {{ $product.Name }}BasePathKey : "{{ $product.BaseUrl }}", +{{- end }} + CloudBillingBasePathKey : "https://cloudbilling.googleapis.com/v1/", +{{- if eq $.TargetVersionName "ga" }} + ContainerBasePathKey : "https://container.googleapis.com/v1/", +{{- else }} + ContainerBasePathKey : "https://container.googleapis.com/v1beta1/", +{{- end }} + DataflowBasePathKey : "https://dataflow.googleapis.com/v1b3/", + IAMBasePathKey : "https://iam.googleapis.com/v1/", + IamCredentialsBasePathKey : "https://iamcredentials.googleapis.com/v1/", + ResourceManagerV3BasePathKey : "https://cloudresourcemanager.googleapis.com/v3/", + BigtableAdminBasePathKey : "https://bigtableadmin.googleapis.com/v2/", + ContainerAwsBasePathKey: "https://{{"{{"}}location{{"}}"}}-gkemulticloud.googleapis.com/v1/", + ContainerAzureBasePathKey: "https://{{"{{"}}location{{"}}"}}-gkemulticloud.googleapis.com/v1/", + TagsLocationBasePathKey: "https://{{"{{"}}location{{"}}"}}-cloudresourcemanager.googleapis.com/v3/", +} + +var DefaultClientScopes = []string{ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/userinfo.email", +} + +const AttributionKey = "goog-terraform-provisioned" +const AttributionValue = "true" +const CreateOnlyAttributionStrategy = "CREATION_ONLY" +const ProactiveAttributionStrategy = "PROACTIVE" + +func HandleSDKDefaults(d *schema.ResourceData) error { + if d.Get("impersonate_service_account") == "" { + d.Set("impersonate_service_account", MultiEnvDefault([]string{ + "GOOGLE_IMPERSONATE_SERVICE_ACCOUNT", + }, nil)) + } + + if d.Get("project") == "" { + d.Set("project", MultiEnvDefault([]string{ + "GOOGLE_PROJECT", + "GOOGLE_CLOUD_PROJECT", + "GCLOUD_PROJECT", + "CLOUDSDK_CORE_PROJECT", + }, nil)) + } + + if d.Get("billing_project") == "" { + d.Set("billing_project", MultiEnvDefault([]string{ + "GOOGLE_BILLING_PROJECT", + }, nil)) + } + + if d.Get("region") == "" { + d.Set("region", MultiEnvDefault([]string{ + "GOOGLE_REGION", + "GCLOUD_REGION", + "CLOUDSDK_COMPUTE_REGION", + }, nil)) + } + + if d.Get("zone") == "" { + d.Set("zone", MultiEnvDefault([]string{ + "GOOGLE_ZONE", + "GCLOUD_ZONE", + "CLOUDSDK_COMPUTE_ZONE", + }, nil)) + } + + if _, ok := d.GetOkExists("user_project_override"); !ok { + override := MultiEnvDefault([]string{ + "USER_PROJECT_OVERRIDE", + }, nil) + + if override != nil { + b, err := strconv.ParseBool(override.(string)) + if err != nil { + return err + } + d.Set("user_project_override", b) + } + } + + if d.Get("request_reason") == "" { + d.Set("request_reason", MultiEnvDefault([]string{ + "CLOUDSDK_CORE_REQUEST_REASON", + }, nil)) + } + return nil +} + +func SetEndpointDefaults(d *schema.ResourceData) error { + // Generated Products + {{- range $product := $.Products }} + if d.Get("{{ underscore $product.Name }}_custom_endpoint") == "" { + d.Set("{{ underscore $product.Name }}_custom_endpoint", MultiEnvDefault([]string{ + "GOOGLE_{{ upper (underscore $product.Name) }}_CUSTOM_ENDPOINT", + }, DefaultBasePaths[{{ $product.Name }}BasePathKey])) + } + {{- end }} + + if d.Get(CloudBillingCustomEndpointEntryKey) == "" { + d.Set(CloudBillingCustomEndpointEntryKey, MultiEnvDefault([]string{ + "GOOGLE_CLOUD_BILLING_CUSTOM_ENDPOINT", + }, DefaultBasePaths[CloudBillingBasePathKey])) + } + + if d.Get(ComposerCustomEndpointEntryKey) == "" { + d.Set(ComposerCustomEndpointEntryKey, MultiEnvDefault([]string{ + "GOOGLE_COMPOSER_CUSTOM_ENDPOINT", + }, DefaultBasePaths[ComposerBasePathKey])) + } + + if d.Get(ContainerCustomEndpointEntryKey) == "" { + d.Set(ContainerCustomEndpointEntryKey, MultiEnvDefault([]string{ + "GOOGLE_CONTAINER_CUSTOM_ENDPOINT", + }, DefaultBasePaths[ContainerBasePathKey])) + } + + if d.Get(DataflowCustomEndpointEntryKey) == "" { + d.Set(DataflowCustomEndpointEntryKey, MultiEnvDefault([]string{ + "GOOGLE_DATAFLOW_CUSTOM_ENDPOINT", + }, DefaultBasePaths[DataflowBasePathKey])) + } + + if d.Get(IamCredentialsCustomEndpointEntryKey) == "" { + d.Set(IamCredentialsCustomEndpointEntryKey, MultiEnvDefault([]string{ + "GOOGLE_IAM_CREDENTIALS_CUSTOM_ENDPOINT", + }, DefaultBasePaths[IamCredentialsBasePathKey])) + } + + if d.Get(ResourceManagerV3CustomEndpointEntryKey) == "" { + d.Set(ResourceManagerV3CustomEndpointEntryKey, MultiEnvDefault([]string{ + "GOOGLE_RESOURCE_MANAGER_V3_CUSTOM_ENDPOINT", + }, DefaultBasePaths[ResourceManagerV3BasePathKey])) + } + + {{ if ne $.TargetVersionName `ga` -}} + if d.Get(RuntimeConfigCustomEndpointEntryKey) == "" { + d.Set(RuntimeConfigCustomEndpointEntryKey, MultiEnvDefault([]string{ + "GOOGLE_RUNTIMECONFIG_CUSTOM_ENDPOINT", + }, DefaultBasePaths[RuntimeConfigBasePathKey])) + } + {{- end }} + + if d.Get(IAMCustomEndpointEntryKey) == "" { + d.Set(IAMCustomEndpointEntryKey, MultiEnvDefault([]string{ + "GOOGLE_IAM_CUSTOM_ENDPOINT", + }, DefaultBasePaths[IAMBasePathKey])) + } + + if d.Get(ServiceNetworkingCustomEndpointEntryKey) == "" { + d.Set(ServiceNetworkingCustomEndpointEntryKey, MultiEnvDefault([]string{ + "GOOGLE_SERVICE_NETWORKING_CUSTOM_ENDPOINT", + }, DefaultBasePaths[ServiceNetworkingBasePathKey])) + } + + if d.Get(TagsLocationCustomEndpointEntryKey) == "" { + d.Set(TagsLocationCustomEndpointEntryKey, MultiEnvDefault([]string{ + "GOOGLE_TAGS_LOCATION_CUSTOM_ENDPOINT", + }, DefaultBasePaths[TagsLocationBasePathKey])) + } + + if d.Get(ContainerAwsCustomEndpointEntryKey) == "" { + d.Set(ContainerAwsCustomEndpointEntryKey, MultiEnvDefault([]string{ + "GOOGLE_CONTAINERAWS_CUSTOM_ENDPOINT", + }, DefaultBasePaths[ContainerAwsBasePathKey])) + } + + if d.Get(ContainerAzureCustomEndpointEntryKey) == "" { + d.Set(ContainerAzureCustomEndpointEntryKey, MultiEnvDefault([]string{ + "GOOGLE_CONTAINERAZURE_CUSTOM_ENDPOINT", + }, DefaultBasePaths[ContainerAzureBasePathKey])) + } + + return nil +} + +func (c *Config) LoadAndValidate(ctx context.Context) error { + if len(c.Scopes) == 0 { + c.Scopes = DefaultClientScopes + } + + c.Context = ctx + + tokenSource, err := c.getTokenSource(c.Scopes, false) + if err != nil { + return err + } + + c.tokenSource = tokenSource + + cleanCtx := context.WithValue(ctx, oauth2.HTTPClient, cleanhttp.DefaultClient()) + + // 1. MTLS TRANSPORT/CLIENT - sets up proper auth headers + client, _, err := transport.NewHTTPClient(cleanCtx, option.WithTokenSource(tokenSource)) + if err != nil { + return err + } + + // Userinfo is fetched before request logging is enabled to reduce additional noise. + err = c.logGoogleIdentities() + if err != nil { + return err + } + + // 2. Logging Transport - ensure we log HTTP requests to GCP APIs. + loggingTransport := logging.NewTransport("Google", client.Transport) + + // 3. Retry Transport - retries common temporary errors + // Keep order for wrapping logging so we log each retried request as well. + // This value should be used if needed to create shallow copies with additional retry predicates. + // See ClientWithAdditionalRetries + retryTransport := NewTransportWithDefaultRetries(loggingTransport) + + // 4. Header Transport - outer wrapper to inject additional headers we want to apply + // before making requests + headerTransport := NewTransportWithHeaders(retryTransport) + if c.RequestReason != "" { + headerTransport.Set("X-Goog-Request-Reason", c.RequestReason) + } + + // Ensure $userProject is set for all HTTP requests using the client if specified by the provider config + // See https://cloud.google.com/apis/docs/system-parameters + if c.UserProjectOverride && c.BillingProject != "" { + headerTransport.Set("X-Goog-User-Project", c.BillingProject) + } + + // Set final transport value. + client.Transport = headerTransport + + // This timeout is a timeout per HTTP request, not per logical operation. + client.Timeout = c.synchronousTimeout() + + c.Client = client + c.Context = ctx + c.Region = GetRegionFromRegionSelfLink(c.Region) + c.RequestBatcherServiceUsage = NewRequestBatcher("Service Usage", ctx, c.BatchingConfig) + c.RequestBatcherIam = NewRequestBatcher("IAM", ctx, c.BatchingConfig) + c.PollInterval = 10 * time.Second + + // gRPC Logging setup + logger := logrus.StandardLogger() + + logrus.SetLevel(logrus.DebugLevel) + logrus.SetFormatter(&Formatter{ + TimestampFormat: "2006/01/02 15:04:05", + LogFormat: "%time% [%lvl%] %msg% \n", + }) + + alwaysLoggingDeciderClient := func(ctx context.Context, fullMethodName string) bool { return true } + grpc_logrus.ReplaceGrpcLogger(logrus.NewEntry(logger)) + + c.gRPCLoggingOptions = append( + c.gRPCLoggingOptions, option.WithGRPCDialOption(grpc.WithUnaryInterceptor( + grpc_logrus.PayloadUnaryClientInterceptor(logrus.NewEntry(logger), alwaysLoggingDeciderClient))), + option.WithGRPCDialOption(grpc.WithStreamInterceptor( + grpc_logrus.PayloadStreamClientInterceptor(logrus.NewEntry(logger), alwaysLoggingDeciderClient))), + ) + + return nil +} + +func ExpandProviderBatchingConfig(v interface{}) (*BatchingConfig, error) { + config := &BatchingConfig{ + SendAfter: time.Second * DefaultBatchSendIntervalSec, + EnableBatching: true, + } + + if v == nil { + return config, nil + } + ls := v.([]interface{}) + if len(ls) == 0 || ls[0] == nil { + return config, nil + } + + cfgV := ls[0].(map[string]interface{}) + if sendAfterV, ok := cfgV["send_after"]; ok && sendAfterV != "" { + SendAfter, err := time.ParseDuration(sendAfterV.(string)) + if err != nil { + return nil, fmt.Errorf("unable to parse duration from 'send_after' value %q", sendAfterV) + } + config.SendAfter = SendAfter + } + + if enable, ok := cfgV["enable_batching"]; ok { + config.EnableBatching = enable.(bool) + } + + return config, nil +} + +func (c *Config) synchronousTimeout() time.Duration { + if c.RequestTimeout == 0 { + return 120 * time.Second + } + return c.RequestTimeout +} + +// Print Identities executing terraform API Calls. +func (c *Config) logGoogleIdentities() error { + if c.ImpersonateServiceAccount == "" { + + tokenSource, err := c.getTokenSource(c.Scopes, true) + if err != nil { + return err + } + c.Client = oauth2.NewClient(c.Context, tokenSource) // c.Client isn't initialised fully when this code is called. + + email, err := GetCurrentUserEmail(c, c.UserAgent) + if err != nil { + log.Printf("[INFO] error retrieving userinfo for your provider credentials. have you enabled the 'https://www.googleapis.com/auth/userinfo.email' scope? error: %s", err) + } + + log.Printf("[INFO] Terraform is using this identity: %s", email) + + return nil + + } + + // Drop Impersonated ClientOption from OAuth2 TokenSource to infer original identity + + tokenSource, err := c.getTokenSource(c.Scopes, true) + if err != nil { + return err + } + c.Client = oauth2.NewClient(c.Context, tokenSource) // c.Client isn't initialised fully when this code is called. + + email, err := GetCurrentUserEmail(c, c.UserAgent) + if err != nil { + log.Printf("[INFO] error retrieving userinfo for your provider credentials. have you enabled the 'https://www.googleapis.com/auth/userinfo.email' scope? error: %s", err) + } + + log.Printf("[INFO] Terraform is configured with service account impersonation, original identity: %s, impersonated identity: %s", email, c.ImpersonateServiceAccount) + + // Add the Impersonated ClientOption back in to the OAuth2 TokenSource + + tokenSource, err = c.getTokenSource(c.Scopes, false) + if err != nil { + return err + } + c.Client = oauth2.NewClient(c.Context, tokenSource) // c.Client isn't initialised fully when this code is called. + + return nil +} + +// Get a TokenSource based on the Google Credentials configured. +// If initialCredentialsOnly is true, don't follow the impersonation settings and return the initial set of creds. +func (c *Config) getTokenSource(clientScopes []string, initialCredentialsOnly bool) (oauth2.TokenSource, error) { + creds, err := c.GetCredentials(clientScopes, initialCredentialsOnly) + if err != nil { + return nil, fmt.Errorf("%s", err) + } + return creds.TokenSource, nil +} + +// Methods to create new services from config +// Some base paths below need the version and possibly more of the path +// set on them. The client libraries are inconsistent about which values they need; +// while most only want the host URL, some older ones also want the version and some +// of those "projects" as well. You can find out if this is required by looking at +// the basePath value in the client library file. +func (c *Config) NewCertificateManagerClient(userAgent string) *certificatemanager.Service { + certificateManagerClientBasePath := RemoveBasePathVersion(c.CertificateManagerBasePath) + log.Printf("[INFO] Instantiating Certificate Manager client for path %s", certificateManagerClientBasePath) + clientCertificateManager, err := certificatemanager.NewService(c.Context, option.WithHTTPClient(c.Client)) + if err != nil { + log.Printf("[WARN] Error creating client certificate manager: %s", err) + return nil + } + clientCertificateManager.UserAgent = userAgent + clientCertificateManager.BasePath = certificateManagerClientBasePath + + return clientCertificateManager +} + +func (c *Config) NewComputeClient(userAgent string) *compute.Service { + log.Printf("[INFO] Instantiating GCE client for path %s", c.ComputeBasePath) + clientCompute, err := compute.NewService(c.Context, option.WithHTTPClient(c.Client)) + if err != nil { + log.Printf("[WARN] Error creating client compute: %s", err) + return nil + } + clientCompute.UserAgent = userAgent + clientCompute.BasePath = c.ComputeBasePath + + return clientCompute +} + +func (c *Config) NewContainerClient(userAgent string) *container.Service { + containerClientBasePath := RemoveBasePathVersion(c.ContainerBasePath) + log.Printf("[INFO] Instantiating GKE client for path %s", containerClientBasePath) + clientContainer, err := container.NewService(c.Context, option.WithHTTPClient(c.Client)) + if err != nil { + log.Printf("[WARN] Error creating client container: %s", err) + return nil + } + clientContainer.UserAgent = userAgent + clientContainer.BasePath = containerClientBasePath + + return clientContainer +} + +func (c *Config) NewDnsClient(userAgent string) *dns.Service { + dnsClientBasePath := RemoveBasePathVersion(c.DNSBasePath) + dnsClientBasePath = strings.ReplaceAll(dnsClientBasePath, "/dns/", "") + log.Printf("[INFO] Instantiating Google Cloud DNS client for path %s", dnsClientBasePath) + clientDns, err := dns.NewService(c.Context, option.WithHTTPClient(c.Client)) + if err != nil { + log.Printf("[WARN] Error creating client dns: %s", err) + return nil + } + clientDns.UserAgent = userAgent + clientDns.BasePath = dnsClientBasePath + + return clientDns +} + +func (c *Config) NewKmsClientWithCtx(ctx context.Context, userAgent string) *cloudkms.Service { + kmsClientBasePath := RemoveBasePathVersion(c.KMSBasePath) + log.Printf("[INFO] Instantiating Google Cloud KMS client for path %s", kmsClientBasePath) + clientKms, err := cloudkms.NewService(ctx, option.WithHTTPClient(c.Client)) + if err != nil { + log.Printf("[WARN] Error creating client kms: %s", err) + return nil + } + clientKms.UserAgent = userAgent + clientKms.BasePath = kmsClientBasePath + + return clientKms +} + +func (c *Config) NewKmsClient(userAgent string) *cloudkms.Service { + return c.NewKmsClientWithCtx(c.Context, userAgent) +} + +func (c *Config) NewLoggingClient(userAgent string) *cloudlogging.Service { + loggingClientBasePath := RemoveBasePathVersion(c.LoggingBasePath) + log.Printf("[INFO] Instantiating Google Stackdriver Logging client for path %s", loggingClientBasePath) + clientLogging, err := cloudlogging.NewService(c.Context, option.WithHTTPClient(c.Client)) + if err != nil { + log.Printf("[WARN] Error creating client logging: %s", err) + return nil + } + clientLogging.UserAgent = userAgent + clientLogging.BasePath = loggingClientBasePath + + return clientLogging +} + +func (c *Config) NewStorageClient(userAgent string) *storage.Service { + storageClientBasePath := c.StorageBasePath + log.Printf("[INFO] Instantiating Google Storage client for path %s", storageClientBasePath) + clientStorage, err := storage.NewService(c.Context, option.WithHTTPClient(c.Client)) + if err != nil { + log.Printf("[WARN] Error creating client storage: %s", err) + return nil + } + clientStorage.UserAgent = userAgent + clientStorage.BasePath = storageClientBasePath + + return clientStorage +} + +// For object uploads, we need to override the specific timeout because they are long, synchronous operations. +func (c *Config) NewStorageClientWithTimeoutOverride(userAgent string, timeout time.Duration) *storage.Service { + storageClientBasePath := c.StorageBasePath + log.Printf("[INFO] Instantiating Google Storage client for path %s", storageClientBasePath) + // Copy the existing HTTP client (which has no unexported fields [as of Oct 2021 at least], so this is safe). + // We have to do this because otherwise we will accidentally change the timeout for all other + // synchronous operations, which would not be desirable. + httpClient := &http.Client{ + Transport: c.Client.Transport, + CheckRedirect: c.Client.CheckRedirect, + Jar: c.Client.Jar, + Timeout: timeout, + } + clientStorage, err := storage.NewService(c.Context, option.WithHTTPClient(httpClient)) + if err != nil { + log.Printf("[WARN] Error creating client storage: %s", err) + return nil + } + clientStorage.UserAgent = userAgent + clientStorage.BasePath = storageClientBasePath + + return clientStorage +} + +func (c *Config) NewSqlAdminClient(userAgent string) *sqladmin.Service { + sqlClientBasePath := RemoveBasePathVersion(RemoveBasePathVersion(c.SQLBasePath)) + log.Printf("[INFO] Instantiating Google SqlAdmin client for path %s", sqlClientBasePath) + clientSqlAdmin, err := sqladmin.NewService(c.Context, option.WithHTTPClient(c.Client)) + if err != nil { + log.Printf("[WARN] Error creating client storage: %s", err) + return nil + } + clientSqlAdmin.UserAgent = userAgent + clientSqlAdmin.BasePath = sqlClientBasePath + + return clientSqlAdmin +} + +func (c *Config) NewPubsubClient(userAgent string) *pubsub.Service { + pubsubClientBasePath := RemoveBasePathVersion(c.PubsubBasePath) + log.Printf("[INFO] Instantiating Google Pubsub client for path %s", pubsubClientBasePath) + wrappedPubsubClient := ClientWithAdditionalRetries(c.Client, PubsubTopicProjectNotReady) + clientPubsub, err := pubsub.NewService(c.Context, option.WithHTTPClient(wrappedPubsubClient)) + if err != nil { + log.Printf("[WARN] Error creating client pubsub: %s", err) + return nil + } + clientPubsub.UserAgent = userAgent + clientPubsub.BasePath = pubsubClientBasePath + + return clientPubsub +} + +func (c *Config) NewDataflowClient(userAgent string) *dataflow.Service { + dataflowClientBasePath := RemoveBasePathVersion(c.DataflowBasePath) + log.Printf("[INFO] Instantiating Google Dataflow client for path %s", dataflowClientBasePath) + clientDataflow, err := dataflow.NewService(c.Context, option.WithHTTPClient(c.Client)) + if err != nil { + log.Printf("[WARN] Error creating client dataflow: %s", err) + return nil + } + clientDataflow.UserAgent = userAgent + clientDataflow.BasePath = dataflowClientBasePath + + return clientDataflow +} + +func (c *Config) NewResourceManagerClient(userAgent string) *cloudresourcemanager.Service { + resourceManagerBasePath := RemoveBasePathVersion(c.ResourceManagerBasePath) + log.Printf("[INFO] Instantiating Google Cloud ResourceManager client for path %s", resourceManagerBasePath) + clientResourceManager, err := cloudresourcemanager.NewService(c.Context, option.WithHTTPClient(c.Client)) + if err != nil { + log.Printf("[WARN] Error creating client resource manager: %s", err) + return nil + } + clientResourceManager.UserAgent = userAgent + clientResourceManager.BasePath = resourceManagerBasePath + + return clientResourceManager +} + +func (c *Config) NewResourceManagerV3Client(userAgent string) *resourceManagerV3.Service { + resourceManagerV3BasePath := RemoveBasePathVersion(c.ResourceManagerV3BasePath) + log.Printf("[INFO] Instantiating Google Cloud ResourceManager V3 client for path %s", resourceManagerV3BasePath) + clientResourceManagerV3, err := resourceManagerV3.NewService(c.Context, option.WithHTTPClient(c.Client)) + if err != nil { + log.Printf("[WARN] Error creating client resource manager v3: %s", err) + return nil + } + clientResourceManagerV3.UserAgent = userAgent + clientResourceManagerV3.BasePath = resourceManagerV3BasePath + + return clientResourceManagerV3 +} + +{{ if ne $.TargetVersionName `ga` -}} +func(c *Config) NewRuntimeconfigClient(userAgent string) *runtimeconfig.Service { + runtimeConfigClientBasePath := RemoveBasePathVersion(c.RuntimeConfigBasePath) + log.Printf("[INFO] Instantiating Google Cloud Runtimeconfig client for path %s", runtimeConfigClientBasePath) + clientRuntimeconfig, err := runtimeconfig.NewService(c.Context, option.WithHTTPClient(c.Client)) + if err != nil { + log.Printf("[WARN] Error creating client runtime config: %s", err) + return nil + } + clientRuntimeconfig.UserAgent = userAgent + clientRuntimeconfig.BasePath = runtimeConfigClientBasePath + + return clientRuntimeconfig +} +{{- end }} + +func (c *Config) NewIamClient(userAgent string) *iam.Service { + iamClientBasePath := RemoveBasePathVersion(c.IAMBasePath) + log.Printf("[INFO] Instantiating Google Cloud IAM client for path %s", iamClientBasePath) + clientIAM, err := iam.NewService(c.Context, option.WithHTTPClient(c.Client)) + if err != nil { + log.Printf("[WARN] Error creating client iam: %s", err) + return nil + } + clientIAM.UserAgent = userAgent + clientIAM.BasePath = iamClientBasePath + + return clientIAM +} + +func (c *Config) NewIamCredentialsClient(userAgent string) *iamcredentials.Service { + iamCredentialsClientBasePath := RemoveBasePathVersion(c.IamCredentialsBasePath) + log.Printf("[INFO] Instantiating Google Cloud IAMCredentials client for path %s", iamCredentialsClientBasePath) + clientIamCredentials, err := iamcredentials.NewService(c.Context, option.WithHTTPClient(c.Client)) + if err != nil { + log.Printf("[WARN] Error creating client iam credentials: %s", err) + return nil + } + clientIamCredentials.UserAgent = userAgent + clientIamCredentials.BasePath = iamCredentialsClientBasePath + + return clientIamCredentials +} + +func (c *Config) NewServiceManClient(userAgent string) *servicemanagement.APIService { + serviceManagementClientBasePath := RemoveBasePathVersion(c.ServiceManagementBasePath) + log.Printf("[INFO] Instantiating Google Cloud Service Management client for path %s", serviceManagementClientBasePath) + clientServiceMan, err := servicemanagement.NewService(c.Context, option.WithHTTPClient(c.Client)) + if err != nil { + log.Printf("[WARN] Error creating client service management: %s", err) + return nil + } + clientServiceMan.UserAgent = userAgent + clientServiceMan.BasePath = serviceManagementClientBasePath + + return clientServiceMan +} + +func (c *Config) NewServiceUsageClient(userAgent string) *serviceusage.Service { + serviceUsageClientBasePath := RemoveBasePathVersion(c.ServiceUsageBasePath) + log.Printf("[INFO] Instantiating Google Cloud Service Usage client for path %s", serviceUsageClientBasePath) + clientServiceUsage, err := serviceusage.NewService(c.Context, option.WithHTTPClient(c.Client)) + if err != nil { + log.Printf("[WARN] Error creating client service usage: %s", err) + return nil + } + clientServiceUsage.UserAgent = userAgent + clientServiceUsage.BasePath = serviceUsageClientBasePath + + return clientServiceUsage +} + +func (c *Config) NewBillingClient(userAgent string) *cloudbilling.APIService { + cloudBillingClientBasePath := RemoveBasePathVersion(c.CloudBillingBasePath) + log.Printf("[INFO] Instantiating Google Cloud Billing client for path %s", cloudBillingClientBasePath) + clientBilling, err := cloudbilling.NewService(c.Context, option.WithHTTPClient(c.Client)) + if err != nil { + log.Printf("[WARN] Error creating client billing: %s", err) + return nil + } + clientBilling.UserAgent = userAgent + clientBilling.BasePath = cloudBillingClientBasePath + + return clientBilling +} + +func (c *Config) NewBuildClient(userAgent string) *cloudbuild.Service { + cloudBuildClientBasePath := RemoveBasePathVersion(c.CloudBuildBasePath) + log.Printf("[INFO] Instantiating Google Cloud Build client for path %s", cloudBuildClientBasePath) + clientBuild, err := cloudbuild.NewService(c.Context, option.WithHTTPClient(c.Client)) + if err != nil { + log.Printf("[WARN] Error creating client build: %s", err) + return nil + } + clientBuild.UserAgent = userAgent + clientBuild.BasePath = cloudBuildClientBasePath + + return clientBuild +} + +func (c *Config) NewCloudFunctionsClient(userAgent string) *cloudfunctions.Service { + cloudFunctionsClientBasePath := RemoveBasePathVersion(c.CloudFunctionsBasePath) + log.Printf("[INFO] Instantiating Google Cloud CloudFunctions Client for path %s", cloudFunctionsClientBasePath) + clientCloudFunctions, err := cloudfunctions.NewService(c.Context, option.WithHTTPClient(c.Client)) + if err != nil { + log.Printf("[WARN] Error creating client cloud functions: %s", err) + return nil + } + clientCloudFunctions.UserAgent = userAgent + clientCloudFunctions.BasePath = cloudFunctionsClientBasePath + + return clientCloudFunctions +} + +func (c *Config) NewSourceRepoClient(userAgent string) *sourcerepo.Service { + sourceRepoClientBasePath := RemoveBasePathVersion(c.SourceRepoBasePath) + log.Printf("[INFO] Instantiating Google Cloud Source Repo client for path %s", sourceRepoClientBasePath) + clientSourceRepo, err := sourcerepo.NewService(c.Context, option.WithHTTPClient(c.Client)) + if err != nil { + log.Printf("[WARN] Error creating client source repo: %s", err) + return nil + } + clientSourceRepo.UserAgent = userAgent + clientSourceRepo.BasePath = sourceRepoClientBasePath + + return clientSourceRepo +} + +func (c *Config) NewBigQueryClient(userAgent string) *bigquery.Service { + bigQueryClientBasePath := c.BigQueryBasePath + log.Printf("[INFO] Instantiating Google Cloud BigQuery client for path %s", bigQueryClientBasePath) + wrappedBigQueryClient := ClientWithAdditionalRetries(c.Client, IamMemberMissing) + clientBigQuery, err := bigquery.NewService(c.Context, option.WithHTTPClient(wrappedBigQueryClient)) + if err != nil { + log.Printf("[WARN] Error creating client big query: %s", err) + return nil + } + clientBigQuery.UserAgent = userAgent + clientBigQuery.BasePath = bigQueryClientBasePath + + return clientBigQuery +} + +func (c *Config) NewSpannerClient(userAgent string) *spanner.Service { + spannerClientBasePath := RemoveBasePathVersion(c.SpannerBasePath) + log.Printf("[INFO] Instantiating Google Cloud Spanner client for path %s", spannerClientBasePath) + clientSpanner, err := spanner.NewService(c.Context, option.WithHTTPClient(c.Client)) + if err != nil { + log.Printf("[WARN] Error creating client source repo: %s", err) + return nil + } + clientSpanner.UserAgent = userAgent + clientSpanner.BasePath = spannerClientBasePath + + return clientSpanner +} + +func (c *Config) NewDataprocClient(userAgent string) *dataproc.Service { + dataprocClientBasePath := RemoveBasePathVersion(c.DataprocBasePath) + log.Printf("[INFO] Instantiating Google Cloud Dataproc client for path %s", dataprocClientBasePath) + clientDataproc, err := dataproc.NewService(c.Context, option.WithHTTPClient(c.Client)) + if err != nil { + log.Printf("[WARN] Error creating client dataproc: %s", err) + return nil + } + clientDataproc.UserAgent = userAgent + clientDataproc.BasePath = dataprocClientBasePath + + return clientDataproc +} + +func (c *Config) NewCloudIoTClient(userAgent string) *cloudiot.Service { + cloudIoTClientBasePath := RemoveBasePathVersion(c.CloudIoTBasePath) + log.Printf("[INFO] Instantiating Google Cloud IoT Core client for path %s", cloudIoTClientBasePath) + clientCloudIoT, err := cloudiot.NewService(c.Context, option.WithHTTPClient(c.Client)) + if err != nil { + log.Printf("[WARN] Error creating client cloud iot: %s", err) + return nil + } + clientCloudIoT.UserAgent = userAgent + clientCloudIoT.BasePath = cloudIoTClientBasePath + + return clientCloudIoT +} + +func (c *Config) NewAppEngineClient(userAgent string) *appengine.APIService { + appEngineClientBasePath := RemoveBasePathVersion(c.AppEngineBasePath) + log.Printf("[INFO] Instantiating App Engine client for path %s", appEngineClientBasePath) + clientAppEngine, err := appengine.NewService(c.Context, option.WithHTTPClient(c.Client)) + if err != nil { + log.Printf("[WARN] Error creating client appengine: %s", err) + return nil + } + clientAppEngine.UserAgent = userAgent + clientAppEngine.BasePath = appEngineClientBasePath + + return clientAppEngine +} + +func (c *Config) NewComposerClient(userAgent string) *composer.Service { + composerClientBasePath := RemoveBasePathVersion(c.ComposerBasePath) + log.Printf("[INFO] Instantiating Cloud Composer client for path %s", composerClientBasePath) + clientComposer, err := composer.NewService(c.Context, option.WithHTTPClient(c.Client)) + if err != nil { + log.Printf("[WARN] Error creating client composer: %s", err) + return nil + } + clientComposer.UserAgent = userAgent + clientComposer.BasePath = composerClientBasePath + + return clientComposer +} + +func (c *Config) NewServiceNetworkingClient(userAgent string) *servicenetworking.APIService { + serviceNetworkingClientBasePath := RemoveBasePathVersion(c.ServiceNetworkingBasePath) + log.Printf("[INFO] Instantiating Service Networking client for path %s", serviceNetworkingClientBasePath) + clientServiceNetworking, err := servicenetworking.NewService(c.Context, option.WithHTTPClient(c.Client)) + if err != nil { + log.Printf("[WARN] Error creating client service networking: %s", err) + return nil + } + clientServiceNetworking.UserAgent = userAgent + clientServiceNetworking.BasePath = serviceNetworkingClientBasePath + + return clientServiceNetworking +} + +func (c *Config) NewStorageTransferClient(userAgent string) *storagetransfer.Service { + storageTransferClientBasePath := RemoveBasePathVersion(c.StorageTransferBasePath) + log.Printf("[INFO] Instantiating Google Cloud Storage Transfer client for path %s", storageTransferClientBasePath) + clientStorageTransfer, err := storagetransfer.NewService(c.Context, option.WithHTTPClient(c.Client)) + if err != nil { + log.Printf("[WARN] Error creating client storage transfer: %s", err) + return nil + } + clientStorageTransfer.UserAgent = userAgent + clientStorageTransfer.BasePath = storageTransferClientBasePath + + return clientStorageTransfer +} + +func (c *Config) NewHealthcareClient(userAgent string) *healthcare.Service { + healthcareClientBasePath := RemoveBasePathVersion(c.HealthcareBasePath) + log.Printf("[INFO] Instantiating Google Cloud Healthcare client for path %s", healthcareClientBasePath) + clientHealthcare, err := healthcare.NewService(c.Context, option.WithHTTPClient(c.Client)) + if err != nil { + log.Printf("[WARN] Error creating client healthcare: %s", err) + return nil + } + clientHealthcare.UserAgent = userAgent + clientHealthcare.BasePath = healthcareClientBasePath + + return clientHealthcare +} + +func (c *Config) NewCloudIdentityClient(userAgent string) *cloudidentity.Service { + cloudidentityClientBasePath := RemoveBasePathVersion(c.CloudIdentityBasePath) + log.Printf("[INFO] Instantiating Google Cloud CloudIdentity client for path %s", cloudidentityClientBasePath) + clientCloudIdentity, err := cloudidentity.NewService(c.Context, option.WithHTTPClient(c.Client)) + if err != nil { + log.Printf("[WARN] Error creating client cloud identity: %s", err) + return nil + } + clientCloudIdentity.UserAgent = userAgent + clientCloudIdentity.BasePath = cloudidentityClientBasePath + + return clientCloudIdentity +} + +func (c *Config) BigTableClientFactory(userAgent string) *BigtableClientFactory { + bigtableClientFactory := &BigtableClientFactory{ + UserAgent: userAgent, + TokenSource: c.tokenSource, + gRPCLoggingOptions: c.gRPCLoggingOptions, + BillingProject: c.BillingProject, + UserProjectOverride: c.UserProjectOverride, + } + + return bigtableClientFactory +} + +// Unlike other clients, the Bigtable Admin client doesn't use a single +// service. Instead, there are several distinct services created off +// the base service object. To imitate most other handwritten clients, +// we expose those directly instead of providing the `Service` object +// as a factory. +func (c *Config) NewBigTableProjectsInstancesClient(userAgent string) *bigtableadmin.ProjectsInstancesService { + bigtableAdminBasePath := RemoveBasePathVersion(c.BigtableAdminBasePath) + log.Printf("[INFO] Instantiating Google Cloud BigtableAdmin for path %s", bigtableAdminBasePath) + clientBigtable, err := bigtableadmin.NewService(c.Context, option.WithHTTPClient(c.Client)) + if err != nil { + log.Printf("[WARN] Error creating client big table projects instances: %s", err) + return nil + } + clientBigtable.UserAgent = userAgent + clientBigtable.BasePath = bigtableAdminBasePath + clientBigtableProjectsInstances := bigtableadmin.NewProjectsInstancesService(clientBigtable) + + return clientBigtableProjectsInstances +} + +func (c *Config) NewBigTableProjectsInstancesTablesClient(userAgent string) *bigtableadmin.ProjectsInstancesTablesService { + bigtableAdminBasePath := RemoveBasePathVersion(c.BigtableAdminBasePath) + log.Printf("[INFO] Instantiating Google Cloud BigtableAdmin for path %s", bigtableAdminBasePath) + clientBigtable, err := bigtableadmin.NewService(c.Context, option.WithHTTPClient(c.Client)) + if err != nil { + log.Printf("[WARN] Error creating client projects instances tables: %s", err) + return nil + } + clientBigtable.UserAgent = userAgent + clientBigtable.BasePath = bigtableAdminBasePath + clientBigtableProjectsInstancesTables := bigtableadmin.NewProjectsInstancesTablesService(clientBigtable) + + return clientBigtableProjectsInstancesTables +} + +func (c *Config) NewCloudRunV2Client(userAgent string) *runadminv2.Service { + runAdminV2ClientBasePath := RemoveBasePathVersion(RemoveBasePathVersion(c.CloudRunV2BasePath)) + log.Printf("[INFO] Instantiating Google Cloud Run Admin v2 client for path %s", runAdminV2ClientBasePath) + clientRunAdminV2, err := runadminv2.NewService(c.Context, option.WithHTTPClient(c.Client)) + if err != nil { + log.Printf("[WARN] Error creating client run admin: %s", err) + return nil + } + clientRunAdminV2.UserAgent = userAgent + clientRunAdminV2.BasePath = runAdminV2ClientBasePath + + return clientRunAdminV2 +} + +// StaticTokenSource is used to be able to identify static token sources without reflection. +type StaticTokenSource struct { + oauth2.TokenSource +} + +// Get a set of credentials with a given scope (clientScopes) based on the Config object. +// If initialCredentialsOnly is true, don't follow the impersonation settings and return the initial set of creds +// instead. +func (c *Config) GetCredentials(clientScopes []string, initialCredentialsOnly bool) (googleoauth.Credentials, error) { + // UniverseDomain is assumed to be the previously set provider-configured value for access tokens + if c.AccessToken != "" { + contents, _, err := verify.PathOrContents(c.AccessToken) + if err != nil { + return googleoauth.Credentials{}, fmt.Errorf("Error loading access token: %s", err) + } + + token := &oauth2.Token{AccessToken: contents} + if c.ImpersonateServiceAccount != "" && !initialCredentialsOnly { + opts := []option.ClientOption{option.WithTokenSource(oauth2.StaticTokenSource(token)), option.ImpersonateCredentials(c.ImpersonateServiceAccount, c.ImpersonateServiceAccountDelegates...), option.WithScopes(clientScopes...)} + creds, err := transport.Creds(context.TODO(), opts...) + if err != nil { + return googleoauth.Credentials{}, err + } + return *creds, nil + } + + log.Printf("[INFO] Authenticating using configured Google JSON 'access_token'...") + log.Printf("[INFO] -- Scopes: %s", clientScopes) + return googleoauth.Credentials{ + TokenSource: StaticTokenSource{oauth2.StaticTokenSource(token)}, + }, nil + } + + // UniverseDomain is set by the credential file's "universe_domain" field + if c.Credentials != "" { + contents, _, err := verify.PathOrContents(c.Credentials) + if err != nil { + return googleoauth.Credentials{}, fmt.Errorf("error loading credentials: %s", err) + } + + var content map[string]any + if err := json.Unmarshal([]byte(contents), &content); err != nil { + return googleoauth.Credentials{}, fmt.Errorf("error unmarshaling credentials: %s", err) + } + + if content["universe_domain"] != nil { + c.UniverseDomain = content["universe_domain"].(string) + } else { + // Unset UniverseDomain if not found in credentials file + c.UniverseDomain = "" + } + + if c.ImpersonateServiceAccount != "" && !initialCredentialsOnly { + opts := []option.ClientOption{option.WithCredentialsJSON([]byte(contents)), option.ImpersonateCredentials(c.ImpersonateServiceAccount, c.ImpersonateServiceAccountDelegates...), option.WithScopes(clientScopes...)} + creds, err := transport.Creds(context.TODO(), opts...) + if err != nil { + return googleoauth.Credentials{}, err + } + return *creds, nil + } + + if c.UniverseDomain != "" && c.UniverseDomain != "googleapis.com" { + creds, err := transport.Creds(c.Context, option.WithCredentialsJSON([]byte(contents)), option.WithScopes(clientScopes...), internaloption.EnableJwtWithScope()) + if err != nil { + return googleoauth.Credentials{}, fmt.Errorf("unable to parse credentials from '%s': %s", contents, err) + } + log.Printf("[INFO] Authenticating using configured Google JSON 'credentials'...") + log.Printf("[INFO] -- Scopes: %s", clientScopes) + log.Printf("[INFO] -- Sending EnableJwtWithScope option") + return *creds, nil + } else { + creds, err := transport.Creds(c.Context, option.WithCredentialsJSON([]byte(contents)), option.WithScopes(clientScopes...)) + if err != nil { + return googleoauth.Credentials{}, fmt.Errorf("unable to parse credentials from '%s': %s", contents, err) + } + log.Printf("[INFO] Authenticating using configured Google JSON 'credentials'...") + log.Printf("[INFO] -- Scopes: %s", clientScopes) + return *creds, nil + } + } + + var creds *googleoauth.Credentials + var err error + if c.ImpersonateServiceAccount != "" && !initialCredentialsOnly { + opts := option.ImpersonateCredentials(c.ImpersonateServiceAccount, c.ImpersonateServiceAccountDelegates...) + creds, err = transport.Creds(context.TODO(), opts, option.WithScopes(clientScopes...)) + if err != nil { + return googleoauth.Credentials{}, err + } + } else { + log.Printf("[INFO] Authenticating using DefaultClient...") + log.Printf("[INFO] -- Scopes: %s", clientScopes) + + if c.UniverseDomain != "" && c.UniverseDomain != "googleapis.com" { + log.Printf("[INFO] -- Sending JwtWithScope option") + creds, err = transport.Creds(context.Background(), option.WithScopes(clientScopes...), internaloption.EnableJwtWithScope()) + if err != nil { + return googleoauth.Credentials{}, fmt.Errorf("Attempted to load application default credentials since neither `credentials` nor `access_token` was set in the provider block. No credentials loaded. To use your gcloud credentials, run 'gcloud auth application-default login'. Original error: %w", err) + } + } else { + creds, err = transport.Creds(context.Background(), option.WithScopes(clientScopes...)) + if err != nil { + return googleoauth.Credentials{}, fmt.Errorf("Attempted to load application default credentials since neither `credentials` nor `access_token` was set in the provider block. No credentials loaded. To use your gcloud credentials, run 'gcloud auth application-default login'. Original error: %w", err) + } + } + } + + if creds.JSON != nil { + var content map[string]any + if err := json.Unmarshal([]byte(creds.JSON), &content); err != nil { + log.Printf("[WARN] error unmarshaling credentials, skipping Universe Domain detection") + c.UniverseDomain = "" + } else if content["universe_domain"] != nil { + c.UniverseDomain = content["universe_domain"].(string) + } else { + // Unset UniverseDomain if not found in ADC credentials file + c.UniverseDomain = "" + } + } else { + // creds.GetUniverseDomain may retrieve a domain from the metadata server + ud, err := creds.GetUniverseDomain() + if err != nil { + log.Printf("[WARN] Error retrieving universe domain: %s", err) + } + c.UniverseDomain = ud + } + + return *creds, nil +} + +// Remove the `/{{"{{"}}version{{"}}"}}/` from a base path if present. +func RemoveBasePathVersion(url string) string { + re := regexp.MustCompile(`(?Phttp[s]://.*)(?P/[^/]+?/$)`) + return re.ReplaceAllString(url, "$1/") +} + +// For a consumer of config.go that isn't a full fledged provider and doesn't +// have its own endpoint mechanism such as sweepers, init {{"{{"}}service{{"}}"}}BasePath +// values to a default. After using this, you should call config.LoadAndValidate. +func ConfigureBasePaths(c *Config) { + // Generated Products + {{- range $product := $.Products }} + c.{{ $product.Name }}BasePath = DefaultBasePaths[{{ $product.Name }}BasePathKey] + {{- end }} + + // Handwritten Products / Versioned / Atypical Entries + c.CloudBillingBasePath = DefaultBasePaths[CloudBillingBasePathKey] + c.ComposerBasePath = DefaultBasePaths[ComposerBasePathKey] + c.ContainerBasePath = DefaultBasePaths[ContainerBasePathKey] + c.DataprocBasePath = DefaultBasePaths[DataprocBasePathKey] + c.DataflowBasePath = DefaultBasePaths[DataflowBasePathKey] + c.IamCredentialsBasePath = DefaultBasePaths[IamCredentialsBasePathKey] + c.ResourceManagerV3BasePath = DefaultBasePaths[ResourceManagerV3BasePathKey] + c.IAMBasePath = DefaultBasePaths[IAMBasePathKey] + c.BigQueryBasePath = DefaultBasePaths[BigQueryBasePathKey] + c.BigtableAdminBasePath = DefaultBasePaths[BigtableAdminBasePathKey] + c.TagsLocationBasePath = DefaultBasePaths[TagsLocationBasePathKey] +} + +func GetCurrentUserEmail(config *Config, userAgent string) (string, error) { + // When environment variables UserProjectOverride and BillingProject are set for the provider, + // the header X-Goog-User-Project is set for the API requests. + // But it causes an error when calling GetCurrentUserEmail. Set the project to be "NO_BILLING_PROJECT_OVERRIDE". + // And then it triggers the header X-Goog-User-Project to be set to empty string. + + // See https://github.com/golang/oauth2/issues/306 for a recommendation to do this from a Go maintainer + // URL retrieved from https://accounts.google.com/.well-known/openid-configuration + res, err := SendRequest(SendRequestOptions{ + Config: config, + Method: "GET", + Project: "NO_BILLING_PROJECT_OVERRIDE", + RawURL: "https://openidconnect.googleapis.com/v1/userinfo", + UserAgent: userAgent, + }) + + if err != nil { + return "", fmt.Errorf("error retrieving userinfo for your provider credentials. have you enabled the 'https://www.googleapis.com/auth/userinfo.email' scope? error: %s", err) + } + if res["email"] == nil { + return "", fmt.Errorf("error retrieving email from userinfo. email was nil in the response.") + } + return res["email"].(string), nil +} + +func MultiEnvSearch(ks []string) string { + for _, k := range ks { + if v := os.Getenv(k); v != "" { + return v + } + } + return "" +} + +// MultiEnvDefault is a helper function that returns the value of the first +// environment variable in the given list that returns a non-empty value. If +// none of the environment variables return a value, the default value is +// returned. +func MultiEnvDefault(ks []string, dv interface{}) interface{} { + for _, k := range ks { + if v := os.Getenv(k); v != "" { + return v + } + } + return dv +} + +func CustomEndpointValidator() validator.String { + return stringvalidator.RegexMatches(regexp.MustCompile(`.*/[^/]+/$`), "") +} + +// return the region a selfLink is referring to +func GetRegionFromRegionSelfLink(selfLink string) string { + re := regexp.MustCompile("/compute/[a-zA-Z0-9]*/projects/[a-zA-Z0-9-]*/regions/([a-zA-Z0-9-]*)") + switch { + case re.MatchString(selfLink): + if res := re.FindStringSubmatch(selfLink); len(res) == 2 && res[1] != "" { + return res[1] + } + } + return selfLink +} From 4f35c8aaa99063b05ef948d62f129f334e15f049 Mon Sep 17 00:00:00 2001 From: abd-goog <156919569+abd-goog@users.noreply.github.com> Date: Thu, 5 Sep 2024 21:30:49 +0530 Subject: [PATCH 39/60] Add `tags` field to Folder resource (#11424) --- .../resourcemanager/resource_google_folder.go | 20 ++++- .../resource_google_folder_test.go | 73 +++++++++++++++++++ .../docs/r/google_folder.html.markdown | 11 +++ 3 files changed, 100 insertions(+), 4 deletions(-) diff --git a/mmv1/third_party/terraform/services/resourcemanager/resource_google_folder.go b/mmv1/third_party/terraform/services/resourcemanager/resource_google_folder.go index 653da5651bc9..78f68ae7506d 100644 --- a/mmv1/third_party/terraform/services/resourcemanager/resource_google_folder.go +++ b/mmv1/third_party/terraform/services/resourcemanager/resource_google_folder.go @@ -71,6 +71,13 @@ func ResourceGoogleFolder() *schema.Resource { Default: true, Description: `When the field is set to true or unset in Terraform state, a terraform apply or terraform destroy that would delete the instance will fail. When the field is set to false, deleting the instance is allowed.`, }, + "tags": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored when empty.`, + }, }, UseJSONNumber: true, } @@ -86,14 +93,19 @@ func resourceGoogleFolderCreate(d *schema.ResourceData, meta interface{}) error displayName := d.Get("display_name").(string) parent := d.Get("parent").(string) + folder := &resourceManagerV3.Folder{ + DisplayName: displayName, + Parent: parent, + } + if _, ok := d.GetOk("tags"); ok { + folder.Tags = tpgresource.ExpandStringMap(d, "tags") + } + var op *resourceManagerV3.Operation err = transport_tpg.Retry(transport_tpg.RetryOptions{ RetryFunc: func() error { var reqErr error - op, reqErr = config.NewResourceManagerV3Client(userAgent).Folders.Create(&resourceManagerV3.Folder{ - DisplayName: displayName, - Parent: parent, - }).Do() + op, reqErr = config.NewResourceManagerV3Client(userAgent).Folders.Create(folder).Do() return reqErr }, Timeout: d.Timeout(schema.TimeoutCreate), diff --git a/mmv1/third_party/terraform/services/resourcemanager/resource_google_folder_test.go b/mmv1/third_party/terraform/services/resourcemanager/resource_google_folder_test.go index 75f1a08ca995..b90e63614f6f 100644 --- a/mmv1/third_party/terraform/services/resourcemanager/resource_google_folder_test.go +++ b/mmv1/third_party/terraform/services/resourcemanager/resource_google_folder_test.go @@ -2,6 +2,7 @@ package resourcemanager_test import ( "fmt" + "regexp" "testing" "github.com/hashicorp/terraform-plugin-testing/helper/resource" @@ -88,6 +89,45 @@ func TestAccFolder_moveParent(t *testing.T) { }) } +// Test that a Folder resource can be created with tags +func TestAccFolder_tags(t *testing.T) { + t.Parallel() + + org := envvar.GetTestOrgFromEnv(t) + parent := "organizations/" + org + folderDisplayName := "tf-test-" + acctest.RandString(t, 10) + tagKey := acctest.BootstrapSharedTestTagKey(t, "crm-folder-tagkey") + tagValue := acctest.BootstrapSharedTestTagValue(t, "crm-folder-tagvalue", tagKey) + folder_tags := resourceManagerV3.Folder{} + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccFolder_tags(folderDisplayName, parent, map[string]string{org + "/" + tagKey: tagValue}), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleFolderExists(t, "google_folder.folder_tags", &folder_tags), + ), + }, + // Make sure import supports tags + { + ResourceName: "google_folder.folder_tags", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"tags", "deletion_protection"}, // we don't read tags back + }, + // Update tags tries to replace the folder but fails due to deletion protection + { + Config: testAccFolder_tags(folderDisplayName, org, map[string]string{}), + ExpectError: regexp.MustCompile("deletion_protection"), + }, + { + Config: testAccFolder_tagsAllowDestroy(folderDisplayName, parent, map[string]string{org + "/" + tagKey: tagValue}), + }, + }, + }) +} + func testAccCheckGoogleFolderDestroyProducer(t *testing.T) func(s *terraform.State) error { return func(s *terraform.State) error { config := acctest.GoogleProviderConfig(t) @@ -159,6 +199,39 @@ resource "google_folder" "folder1" { `, folder, parent) } +func testAccFolder_tags(folder, parent string, tags map[string]string) string { + r := fmt.Sprintf(` +resource "google_folder" "folder_tags" { + display_name = "%s" + parent = "%s" + tags = {`, folder, parent) + + l := "" + for key, value := range tags { + l += fmt.Sprintf("%q = %q\n", key, value) + } + + l += fmt.Sprintf("}\n}") + return r + l +} + +func testAccFolder_tagsAllowDestroy(folder, parent string, tags map[string]string) string { + r := fmt.Sprintf(` +resource "google_folder" "folder_tags" { + display_name = "%s" + parent = "%s" + deletion_protection = false + tags = {`, folder, parent) + + l := "" + for key, value := range tags { + l += fmt.Sprintf("%q = %q\n", key, value) + } + + l += fmt.Sprintf("}\n}") + return r + l +} + func testAccFolder_move(folder1, folder2, parent string) string { return fmt.Sprintf(` resource "google_folder" "folder1" { diff --git a/mmv1/third_party/terraform/website/docs/r/google_folder.html.markdown b/mmv1/third_party/terraform/website/docs/r/google_folder.html.markdown index e0f47767a350..8d977035ea8b 100644 --- a/mmv1/third_party/terraform/website/docs/r/google_folder.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/google_folder.html.markdown @@ -20,6 +20,8 @@ resource must have `roles/resourcemanager.folderCreator`. See the [Access Control for Folders Using IAM](https://cloud.google.com/resource-manager/docs/access-control-folders) doc for more information. +~> It may take a while for the attached tag bindings to be deleted after the folder is scheduled to be deleted. + ## Example Usage ```hcl @@ -34,6 +36,13 @@ resource "google_folder" "team-abc" { display_name = "Team ABC" parent = google_folder.department1.name } + +# Folder with a tag +resource "google_folder" "department1" { + display_name = "Department 1" + parent = "organizations/1234567" + tags = {"1234567/env":"staging"} +} ``` ## Argument Reference @@ -46,6 +55,8 @@ The following arguments are supported: * `parent` - (Required) The resource name of the parent Folder or Organization. Must be of the form `folders/{folder_id}` or `organizations/{org_id}`. +* `tags` - (Optional) A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored when empty. The field is immutable and causes resource replacement when mutated. + ## Attributes Reference In addition to the arguments listed above, the following computed attributes are From bf76ec34af49725bfb617eaf3b27c1cbd36a1948 Mon Sep 17 00:00:00 2001 From: Cameron Thornton Date: Thu, 5 Sep 2024 11:18:41 -0500 Subject: [PATCH 40/60] rewrite - fix GA diffs + small refresh (#11642) --- mmv1/api/resource.go | 11 ++- mmv1/api/type.go | 2 +- .../certificatemanager/go_Certificate.yaml | 7 ++ mmv1/products/cloudrun/go_Service.yaml | 8 +- mmv1/products/cloudrunv2/go_Job.yaml | 18 +++- mmv1/products/cloudrunv2/go_Service.yaml | 22 ++++- .../go_WorkloadIdentityPoolProvider.yaml | 63 +++++++++++++ .../securitycenter/go_NotificationConfig.yaml | 3 + .../go_ProjectNotificationConfig.yaml | 3 + .../go_OrganizationNotificationConfig.yaml | 3 + .../go_ProjectNotificationConfig.yaml | 3 + mmv1/provider/terraform.go | 7 +- mmv1/template-converter.go | 4 +- ..._identity_pool_provider_x509_basic.tf.tmpl | 18 ++++ ...d_identity_pool_provider_x509_full.tf.tmpl | 24 +++++ .../terraform/flatten_property_method.go.tmpl | 8 +- .../terraform/schema_property.go.tmpl | 8 +- .../terraform/schema_subresource.go.tmpl | 2 +- .../fwtransport/go/framework_config.go.tmpl | 11 +++ .../go/provider_mmv1_resources.go.tmpl | 1 - .../provider/go/provider_test.go.tmpl | 69 -------------- .../go/data_source_google_compute_instance.go | 2 +- ...rce_compute_instance_template_test.go.tmpl | 3 + ...ompute_region_backend_service_test.go.tmpl | 4 +- .../services/container/go/node_config.go.tmpl | 4 +- .../resource_container_cluster_test.go.tmpl | 6 +- .../resource_container_node_pool_test.go.tmpl | 8 +- .../go/resource_dns_managed_zone_test.go.tmpl | 4 +- ...rkload_identity_pool_provider_test.go.tmpl | 92 +++++++++++++++++++ 29 files changed, 307 insertions(+), 111 deletions(-) create mode 100644 mmv1/templates/terraform/examples/go/iam_workload_identity_pool_provider_x509_basic.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/iam_workload_identity_pool_provider_x509_full.tf.tmpl diff --git a/mmv1/api/resource.go b/mmv1/api/resource.go index cb1ab7daa791..5406d75b33fb 100644 --- a/mmv1/api/resource.go +++ b/mmv1/api/resource.go @@ -349,6 +349,9 @@ func (r *Resource) SetDefault(product *Product) { for _, property := range r.AllProperties() { property.SetDefault(r) } + if r.IamPolicy != nil && r.IamPolicy.MinVersion == "" { + r.IamPolicy.MinVersion = r.MinVersion + } } func (r *Resource) Validate() { @@ -430,6 +433,12 @@ func (r Resource) AllProperties() []*Type { return google.Concat(r.Properties, r.Parameters) } +func (r Resource) AllPropertiesInVersion() []*Type { + return google.Reject(google.Concat(r.Properties, r.Parameters), func(p *Type) bool { + return p.Exclude + }) +} + // def properties_with_excluded func (r Resource) PropertiesWithExcluded() []*Type { return r.Properties @@ -960,7 +969,7 @@ func (r Resource) Updatable() bool { if !r.Immutable { return true } - for _, p := range r.AllProperties() { + for _, p := range r.AllPropertiesInVersion() { if p.UpdateUrl != "" { return true } diff --git a/mmv1/api/type.go b/mmv1/api/type.go index df6cbfb18bf7..0558be2cbb74 100644 --- a/mmv1/api/type.go +++ b/mmv1/api/type.go @@ -1422,7 +1422,7 @@ func (t *Type) GetPropertySchemaPath(schemaPath string) string { } if index == -1 { - continue + return "" } prop := nestedProps[index] diff --git a/mmv1/products/certificatemanager/go_Certificate.yaml b/mmv1/products/certificatemanager/go_Certificate.yaml index 8450def41b28..1dab11d18490 100644 --- a/mmv1/products/certificatemanager/go_Certificate.yaml +++ b/mmv1/products/certificatemanager/go_Certificate.yaml @@ -133,6 +133,13 @@ properties: immutable: true diff_suppress_func: 'certManagerDefaultScopeDiffSuppress' default_value: "DEFAULT" + - name: 'sanDnsnames' + type: Array + description: | + The list of Subject Alternative Names of dnsName type defined in the certificate (see RFC 5280 4.2.1.6) + output: true + item_type: + type: String - name: 'selfManaged' type: NestedObject description: | diff --git a/mmv1/products/cloudrun/go_Service.yaml b/mmv1/products/cloudrun/go_Service.yaml index 4f36bbd0580d..a6f6495bc60c 100644 --- a/mmv1/products/cloudrun/go_Service.yaml +++ b/mmv1/products/cloudrun/go_Service.yaml @@ -846,15 +846,13 @@ properties: type: NestedObject description: |- A filesystem specified by the Container Storage Interface (CSI). - min_version: 'beta' properties: - name: 'driver' type: String description: |- Unique name representing the type of file system to be created. Cloud Run supports the following values: * gcsfuse.run.googleapis.com: Mount a Google Cloud Storage bucket using GCSFuse. This driver requires the - run.googleapis.com/execution-environment annotation to be set to "gen2" and - run.googleapis.com/launch-stage set to "BETA" or "ALPHA". + run.googleapis.com/execution-environment annotation to be unset or set to "gen2" required: true - name: 'readOnly' type: Boolean @@ -871,9 +869,7 @@ properties: type: NestedObject description: |- A filesystem backed by a Network File System share. This filesystem requires the - run.googleapis.com/execution-environment annotation to be set to "gen2" and - run.googleapis.com/launch-stage set to "BETA" or "ALPHA". - min_version: 'beta' + run.googleapis.com/execution-environment annotation to be unset or set to "gen2" properties: - name: 'server' type: String diff --git a/mmv1/products/cloudrunv2/go_Job.yaml b/mmv1/products/cloudrunv2/go_Job.yaml index 77160e111161..398ff9c94bb1 100644 --- a/mmv1/products/cloudrunv2/go_Job.yaml +++ b/mmv1/products/cloudrunv2/go_Job.yaml @@ -522,8 +522,13 @@ properties: - name: 'gcs' type: NestedObject description: |- - Cloud Storage bucket mounted as a volume using GCSFuse. This feature requires the launch stage to be set to ALPHA or BETA. - min_version: 'beta' + Cloud Storage bucket mounted as a volume using GCSFuse. + # exactly_one_of: + # - template.0.volumes.0.secret + # - template.0.volumes.0.cloudSqlInstance + # - template.0.volumes.0.emptyDir + # - template.0.volumes.0.gcs + # - template.0.volumes.0.nfs properties: - name: 'bucket' type: String @@ -537,8 +542,13 @@ properties: - name: 'nfs' type: NestedObject description: |- - NFS share mounted as a volume. This feature requires the launch stage to be set to ALPHA or BETA. - min_version: 'beta' + NFS share mounted as a volume. + # exactly_one_of: + # - template.0.volumes.0.secret + # - template.0.volumes.0.cloudSqlInstance + # - template.0.volumes.0.emptyDir + # - template.0.volumes.0.gcs + # - template.0.volumes.0.nfs properties: - name: 'server' type: String diff --git a/mmv1/products/cloudrunv2/go_Service.yaml b/mmv1/products/cloudrunv2/go_Service.yaml index c33947cb0a89..ee98c8fe647b 100644 --- a/mmv1/products/cloudrunv2/go_Service.yaml +++ b/mmv1/products/cloudrunv2/go_Service.yaml @@ -139,6 +139,16 @@ examples: ignore_read_extra: - 'deletion_protection' skip_vcr: true + - name: 'cloudrunv2_service_mesh' + primary_resource_id: 'default' + primary_resource_name: 'fmt.Sprintf("tf-test-cloudrun-service-%s", context["random_suffix"])' + min_version: 'beta' + vars: + cloud_run_service_name: 'cloudrun-service' + mesh_name: 'network-services-mesh' + ignore_read_extra: + - 'deletion_protection' + external_providers: ["time"] virtual_fields: - name: 'deletion_protection' description: | @@ -813,7 +823,7 @@ properties: - name: 'gcs' type: NestedObject description: |- - Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment and requires launch-stage to be set to ALPHA or BETA. + Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment. # exactly_one_of: # - template.0.volumes.0.secret # - template.0.volumes.0.cloudSqlInstance @@ -865,6 +875,16 @@ properties: type: Boolean description: |- Enables session affinity. For more information, go to https://cloud.google.com/run/docs/configuring/session-affinity + - name: 'serviceMesh' + type: NestedObject + description: |- + Enables Cloud Service Mesh for this Revision. + min_version: 'beta' + properties: + - name: 'mesh' + type: String + description: |- + The Mesh resource name. For more information see https://cloud.google.com/service-mesh/docs/reference/network-services/rest/v1/projects.locations.meshes#resource:-mesh. - name: 'traffic' type: Array description: |- diff --git a/mmv1/products/iambeta/go_WorkloadIdentityPoolProvider.yaml b/mmv1/products/iambeta/go_WorkloadIdentityPoolProvider.yaml index 7bc6308f2e31..cfa05f7ec071 100644 --- a/mmv1/products/iambeta/go_WorkloadIdentityPoolProvider.yaml +++ b/mmv1/products/iambeta/go_WorkloadIdentityPoolProvider.yaml @@ -85,6 +85,16 @@ examples: vars: workload_identity_pool_id: 'example-pool' workload_identity_pool_provider_id: 'example-prvdr' + - name: 'iam_workload_identity_pool_provider_x509_basic' + primary_resource_id: 'example' + vars: + workload_identity_pool_id: 'example-pool' + workload_identity_pool_provider_id: 'example-prvdr' + - name: 'iam_workload_identity_pool_provider_x509_full' + primary_resource_id: 'example' + vars: + workload_identity_pool_id: 'example-pool' + workload_identity_pool_provider_id: 'example-prvdr' parameters: properties: - name: 'workloadIdentityPoolId' @@ -233,6 +243,7 @@ properties: - 'aws' - 'oidc' - 'saml' + - 'x509' properties: - name: 'accountId' type: String @@ -251,6 +262,7 @@ properties: - 'aws' - 'oidc' - 'saml' + - 'x509' properties: - name: 'allowedAudiences' type: Array @@ -309,8 +321,59 @@ properties: - 'aws' - 'oidc' - 'saml' + - 'x509' properties: - name: 'idpMetadataXml' type: String description: SAML Identity provider configuration metadata xml doc. required: true + - name: 'x509' + type: NestedObject + description: | + An X.509-type identity provider represents a CA. It is trusted to assert a + client identity if the client has a certificate that chains up to this CA. + exactly_one_of: + - 'aws' + - 'oidc' + - 'saml' + - 'x509' + properties: + - name: 'trustStore' + type: NestedObject + description: | + A Trust store, use this trust store as a wrapper to config the trust + anchor and optional intermediate cas to help build the trust chain for + the incoming end entity certificate. Follow the x509 guidelines to + define those PEM encoded certs. Only 1 trust store is currently + supported. + required: true + properties: + - name: 'trustAnchors' + type: Array + description: | + List of Trust Anchors to be used while performing validation + against a given TrustStore. The incoming end entity's certificate + must be chained up to one of the trust anchors here. + required: true + item_type: + type: NestedObject + properties: + - name: 'pemCertificate' + type: String + description: | + PEM certificate of the PKI used for validation. Must only contain one + ca certificate(either root or intermediate cert). + - name: 'intermediateCas' + type: Array + description: | + Set of intermediate CA certificates used for building the trust chain to + trust anchor. + IMPORTANT: Intermediate CAs are only supported when configuring x509 federation. + item_type: + type: NestedObject + properties: + - name: 'pemCertificate' + type: String + description: | + PEM certificate of the PKI used for validation. Must only contain one + ca certificate(either root or intermediate cert). diff --git a/mmv1/products/securitycenter/go_NotificationConfig.yaml b/mmv1/products/securitycenter/go_NotificationConfig.yaml index 907ce77b9c17..4171078113bc 100644 --- a/mmv1/products/securitycenter/go_NotificationConfig.yaml +++ b/mmv1/products/securitycenter/go_NotificationConfig.yaml @@ -93,6 +93,8 @@ properties: description: | The config for triggering streaming-based notifications. required: true + send_empty_value: true + allow_empty_object: true update_mask_fields: - 'streamingConfig.filter' properties: @@ -125,3 +127,4 @@ properties: [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) for information on how to write a filter. required: true + send_empty_value: true diff --git a/mmv1/products/securitycenter/go_ProjectNotificationConfig.yaml b/mmv1/products/securitycenter/go_ProjectNotificationConfig.yaml index 57879dd3a948..3d4a2dae57f1 100644 --- a/mmv1/products/securitycenter/go_ProjectNotificationConfig.yaml +++ b/mmv1/products/securitycenter/go_ProjectNotificationConfig.yaml @@ -87,6 +87,8 @@ properties: description: | The config for triggering streaming-based notifications. required: true + send_empty_value: true + allow_empty_object: true update_mask_fields: - 'streamingConfig.filter' properties: @@ -119,3 +121,4 @@ properties: [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) for information on how to write a filter. required: true + send_empty_value: true diff --git a/mmv1/products/securitycenterv2/go_OrganizationNotificationConfig.yaml b/mmv1/products/securitycenterv2/go_OrganizationNotificationConfig.yaml index 9ba89f6b3fd8..1e4aeae4dffb 100644 --- a/mmv1/products/securitycenterv2/go_OrganizationNotificationConfig.yaml +++ b/mmv1/products/securitycenterv2/go_OrganizationNotificationConfig.yaml @@ -100,6 +100,8 @@ properties: description: | The config for triggering streaming-based notifications. required: true + send_empty_value: true + allow_empty_object: true update_mask_fields: - 'streamingConfig.filter' properties: @@ -132,3 +134,4 @@ properties: [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) for information on how to write a filter. required: true + send_empty_value: true diff --git a/mmv1/products/securitycenterv2/go_ProjectNotificationConfig.yaml b/mmv1/products/securitycenterv2/go_ProjectNotificationConfig.yaml index 62f92ffa8f3c..bb14bb308707 100644 --- a/mmv1/products/securitycenterv2/go_ProjectNotificationConfig.yaml +++ b/mmv1/products/securitycenterv2/go_ProjectNotificationConfig.yaml @@ -97,6 +97,8 @@ properties: description: | The config for triggering streaming-based notifications. required: true + send_empty_value: true + allow_empty_object: true update_mask_fields: - 'streamingConfig.filter' properties: @@ -129,3 +131,4 @@ properties: [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) for information on how to write a filter. required: true + send_empty_value: true diff --git a/mmv1/provider/terraform.go b/mmv1/provider/terraform.go index 860aeee9bd9c..427f151f2bef 100644 --- a/mmv1/provider/terraform.go +++ b/mmv1/provider/terraform.go @@ -25,6 +25,7 @@ import ( "path" "path/filepath" "reflect" + "slices" "strings" "time" @@ -198,7 +199,7 @@ func (t *Terraform) GenerateOperation(outputFolder string) { // IAM policies separately from the resource itself // def generate_iam_policy(pwd, data, generate_code, generate_docs) func (t *Terraform) GenerateIamPolicy(object api.Resource, templateData TemplateData, outputFolder string, generateCode, generateDocs bool) { - if generateCode && object.IamPolicy != nil && (object.IamPolicy.MinVersion == "" || object.IamPolicy.MinVersion >= t.TargetVersionName) { + if generateCode && object.IamPolicy != nil && (object.IamPolicy.MinVersion == "" || slices.Index(product.ORDER, object.IamPolicy.MinVersion) <= slices.Index(product.ORDER, t.TargetVersionName)) { productName := t.Product.ApiName targetFolder := path.Join(outputFolder, t.FolderName(), "services", productName) if err := os.MkdirAll(targetFolder, os.ModePerm); err != nil { @@ -993,10 +994,10 @@ func (t Terraform) SupportedProviderVersions() []string { if i == 0 { continue } - supported = append(supported, v) - if v == t.TargetVersionName { + if i > slices.Index(product.ORDER, t.TargetVersionName) { break } + supported = append(supported, v) } return supported } diff --git a/mmv1/template-converter.go b/mmv1/template-converter.go index a2ea36d1fbf1..79abd4051c18 100644 --- a/mmv1/template-converter.go +++ b/mmv1/template-converter.go @@ -224,14 +224,14 @@ func replace(data []byte) []byte { if err != nil { log.Fatalf("Cannot compile the regular expression: %v", err) } - data = r.ReplaceAll(data, []byte("\n\n$1{{ if or (ne $.TargetVersionName ``) (eq $.TargetVersionName `ga`) }}")) + data = r.ReplaceAll(data, []byte("\n\n$1{{ if not (or (eq $.TargetVersionName ``) (eq $.TargetVersionName `ga`)) }}")) // Replace <% unless version.nil? || version == ['|"]ga['|"] -%> r, err = regexp.Compile(`<% unless version\.nil\? \|\| version == ['|"]ga['|"] -%>`) if err != nil { log.Fatalf("Cannot compile the regular expression: %v", err) } - data = r.ReplaceAll(data, []byte(`{{- if or (ne $.TargetVersionName "") (eq $.TargetVersionName "ga") }}`)) + data = r.ReplaceAll(data, []byte(`{{- if not (or (eq $.TargetVersionName "") (eq $.TargetVersionName "ga")) }}`)) // Replace <% if version.nil? || version == ['|"]ga['|"] -%> r, err = regexp.Compile(`<% if version\.nil\? \|\| version == ['|"]ga['|"] -%>`) diff --git a/mmv1/templates/terraform/examples/go/iam_workload_identity_pool_provider_x509_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/iam_workload_identity_pool_provider_x509_basic.tf.tmpl new file mode 100644 index 000000000000..a1ae8ab0d277 --- /dev/null +++ b/mmv1/templates/terraform/examples/go/iam_workload_identity_pool_provider_x509_basic.tf.tmpl @@ -0,0 +1,18 @@ +resource "google_iam_workload_identity_pool" "pool" { + workload_identity_pool_id = "{{index $.Vars "workload_identity_pool_id"}}" +} + +resource "google_iam_workload_identity_pool_provider" "{{$.PrimaryResourceId}}" { + workload_identity_pool_id = google_iam_workload_identity_pool.pool.workload_identity_pool_id + workload_identity_pool_provider_id = "{{index $.Vars "workload_identity_pool_provider_id"}}" + attribute_mapping = { + "google.subject" = "assertion.subject.dn.cn" + } + x509 { + trust_store { + trust_anchors { + pem_certificate = file("test-fixtures/trust_anchor.pem") + } + } + } +} diff --git a/mmv1/templates/terraform/examples/go/iam_workload_identity_pool_provider_x509_full.tf.tmpl b/mmv1/templates/terraform/examples/go/iam_workload_identity_pool_provider_x509_full.tf.tmpl new file mode 100644 index 000000000000..9b06b491408d --- /dev/null +++ b/mmv1/templates/terraform/examples/go/iam_workload_identity_pool_provider_x509_full.tf.tmpl @@ -0,0 +1,24 @@ +resource "google_iam_workload_identity_pool" "pool" { + workload_identity_pool_id = "{{index $.Vars "workload_identity_pool_id"}}" +} + +resource "google_iam_workload_identity_pool_provider" "{{$.PrimaryResourceId}}" { + workload_identity_pool_id = google_iam_workload_identity_pool.pool.workload_identity_pool_id + workload_identity_pool_provider_id = "{{index $.Vars "workload_identity_pool_provider_id"}}" + display_name = "Name of provider" + description = "X.509 identity pool provider for automated test" + disabled = true + attribute_mapping = { + "google.subject" = "assertion.subject.dn.cn" + } + x509 { + trust_store { + trust_anchors { + pem_certificate = file("test-fixtures/trust_anchor.pem") + } + intermediate_cas { + pem_certificate = file("test-fixtures/intermediate_ca.pem") + } + } + } +} diff --git a/mmv1/templates/terraform/flatten_property_method.go.tmpl b/mmv1/templates/terraform/flatten_property_method.go.tmpl index 1f82fc54f02f..28d81ac7b0a0 100644 --- a/mmv1/templates/terraform/flatten_property_method.go.tmpl +++ b/mmv1/templates/terraform/flatten_property_method.go.tmpl @@ -28,11 +28,11 @@ func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.Reso if len(original) == 0 { return nil } - {{- else if $.Properties }} + {{- else if $.UserProperties }} original := v.(map[string]interface{}) {{- end }} transformed := make(map[string]interface{}) - {{- range $prop := $.Properties }} + {{- range $prop := $.UserProperties }} {{- if $prop.FlattenObject }} if {{ $prop.ApiName }} := flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}{{$prop.TitlelizeProperty}}(original["{{ $prop.ApiName }}"], d, config); {{ $prop.ApiName }} != nil { obj := {{ $prop.ApiName }}.([]interface{})[0] @@ -72,7 +72,7 @@ func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.Reso transformed = append(transformed, map[string]interface{}{ {{- end }} - {{- range $prop := $.ItemType.Properties }} + {{- range $prop := $.ItemType.UserProperties }} {{- if not $prop.IgnoreRead }} "{{ underscore $prop.Name }}": flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}{{$prop.TitlelizeProperty}}(original["{{ $prop.ApiName }}"], d, config), {{- end }} @@ -90,7 +90,7 @@ func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.Reso original := raw.(map[string]interface{}) transformed = append(transformed, map[string]interface{}{ "{{ $.KeyName }}": k, - {{- range $prop := $.ValueType.Properties }} + {{- range $prop := $.ValueType.UserProperties }} "{{ underscore $prop.Name }}": flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}{{$prop.TitlelizeProperty}}(original["{{ $prop.ApiName }}"], d, config), {{- end }} }) diff --git a/mmv1/templates/terraform/schema_property.go.tmpl b/mmv1/templates/terraform/schema_property.go.tmpl index 10a70b36781e..d825d792dcf4 100644 --- a/mmv1/templates/terraform/schema_property.go.tmpl +++ b/mmv1/templates/terraform/schema_property.go.tmpl @@ -14,7 +14,7 @@ */}} {{- define "SchemaFields"}} {{- if .FlattenObject -}} - {{- range $prop := .ResourceMetadata.OrderProperties .Properties -}} + {{- range $prop := .ResourceMetadata.OrderProperties .UserProperties -}} {{ template "SchemaFields" $prop }} {{ end -}} {{- else -}} @@ -76,7 +76,7 @@ Default value: {{ .ItemType.DefaultValue -}} {{ end -}} Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - {{- range $prop := .ResourceMetadata.OrderProperties $.Properties }} + {{- range $prop := .ResourceMetadata.OrderProperties $.UserProperties }} {{template "SchemaFields" $prop}} {{- end }} }, @@ -94,7 +94,7 @@ Default value: {{ .ItemType.DefaultValue -}} {{ else -}} Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - {{- range $prop := .ResourceMetadata.OrderProperties $.ItemType.Properties }} + {{- range $prop := .ResourceMetadata.OrderProperties $.ItemType.UserProperties }} {{template "SchemaFields" $prop}} {{- end }} }, @@ -149,7 +149,7 @@ Default value: {{ .ItemType.DefaultValue -}} ForceNew: true, {{ end -}} }, - {{- range $prop := .ResourceMetadata.OrderProperties $.ValueType.Properties }} + {{- range $prop := .ResourceMetadata.OrderProperties $.ValueType.UserProperties }} {{template "SchemaFields" $prop}} {{- end }} }, diff --git a/mmv1/templates/terraform/schema_subresource.go.tmpl b/mmv1/templates/terraform/schema_subresource.go.tmpl index 485f1425ebdb..afcd1fb62d98 100644 --- a/mmv1/templates/terraform/schema_subresource.go.tmpl +++ b/mmv1/templates/terraform/schema_subresource.go.tmpl @@ -18,7 +18,7 @@ func {{ .NamespaceProperty }}Schema() *schema.Resource { return &schema.Resource{ Schema: map[string]*schema.Schema{ - {{- range $prop := $.ResourceMetadata.OrderProperties $.ItemType.Properties }} + {{- range $prop := $.ResourceMetadata.OrderProperties $.ItemType.UserProperties }} {{template "SchemaFields" $prop}} {{- end }} }, diff --git a/mmv1/third_party/terraform/fwtransport/go/framework_config.go.tmpl b/mmv1/third_party/terraform/fwtransport/go/framework_config.go.tmpl index d148d7e0300f..7593754f37c1 100644 --- a/mmv1/third_party/terraform/fwtransport/go/framework_config.go.tmpl +++ b/mmv1/third_party/terraform/fwtransport/go/framework_config.go.tmpl @@ -32,6 +32,11 @@ import ( ) type FrameworkProviderConfig struct { + // Temporary, as we'll replace use of FrameworkProviderConfig with transport_tpg.Config soon + // transport_tpg.Config has a Credentials field, hence this change is needed + Credentials types.String + // End temporary + BillingProject types.String Client *http.Client Context context.Context @@ -96,6 +101,12 @@ func (p *FrameworkProviderConfig) LoadAndValidateFramework(ctx context.Context, p.{{ $product.Name }}BasePath = data.{{ $product.Name }}CustomEndpoint.ValueString() {{- end }} + // Temporary + p.Credentials = data.Credentials + // End temporary + + // Copy values from the ProviderModel struct containing data about the provider configuration (present only when responsing to ConfigureProvider rpc calls) + // to the FrameworkProviderConfig struct that will be passed and available to all resources/data sources p.Context = ctx p.BillingProject = data.BillingProject p.DefaultLabels = data.DefaultLabels diff --git a/mmv1/third_party/terraform/provider/go/provider_mmv1_resources.go.tmpl b/mmv1/third_party/terraform/provider/go/provider_mmv1_resources.go.tmpl index c7dc42e4882f..c725501f250c 100644 --- a/mmv1/third_party/terraform/provider/go/provider_mmv1_resources.go.tmpl +++ b/mmv1/third_party/terraform/provider/go/provider_mmv1_resources.go.tmpl @@ -230,7 +230,6 @@ var handwrittenDatasources = map[string]*schema.Resource{ "google_vmwareengine_private_cloud": vmwareengine.DataSourceVmwareenginePrivateCloud(), "google_vmwareengine_subnet": vmwareengine.DataSourceVmwareengineSubnet(), "google_vmwareengine_vcenter_credentials": vmwareengine.DataSourceVmwareengineVcenterCredentials(), - // ####### END handwritten datasources ########### } diff --git a/mmv1/third_party/terraform/provider/go/provider_test.go.tmpl b/mmv1/third_party/terraform/provider/go/provider_test.go.tmpl index 14d7bae0ae3b..96a6c3665d29 100644 --- a/mmv1/third_party/terraform/provider/go/provider_test.go.tmpl +++ b/mmv1/third_party/terraform/provider/go/provider_test.go.tmpl @@ -180,75 +180,6 @@ func TestAccProviderIndirectUserProjectOverride(t *testing.T) { }) } -func TestAccProviderCredentialsEmptyString(t *testing.T) { - // Test is not parallel because ENVs are set. - // Need to skip VCR as this test downloads providers from the Terraform Registry - acctest.SkipIfVcr(t) - - creds := envvar.GetTestCredsFromEnv() - project := envvar.GetTestProjectFromEnv() - t.Setenv("GOOGLE_CREDENTIALS", creds) - t.Setenv("GOOGLE_PROJECT", project) - - pid := "tf-test-" + acctest.RandString(t, 10) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - // No TestDestroy since that's not really the point of this test - Steps: []resource.TestStep{ - { - // This is a control for the other test steps; the provider block doesn't contain `credentials = ""` - Config: testAccProviderCredentials_actWithCredsFromEnv(pid), - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - PlanOnly: true, - ExpectNonEmptyPlan: true, - }, - { - // Assert that errors are expected with credentials when - // - GOOGLE_CREDENTIALS is set - // - provider block has credentials = "" - // - TPG v4.60.2 is used - // Context: this was an addidental breaking change introduced with muxing - Config: testAccProviderCredentials_actWithCredsFromEnv_emptyString(pid), - ExternalProviders: map[string]resource.ExternalProvider{ - "google": { - VersionConstraint: "4.60.2", - Source: "hashicorp/google", - }, - }, - PlanOnly: true, - ExpectNonEmptyPlan: true, - ExpectError: regexp.MustCompile(`unexpected end of JSON input`), - }, - { - // Assert that errors are NOT expected with credentials when - // - GOOGLE_CREDENTIALS is set - // - provider block has credentials = "" - // - TPG v4.84.0 is used - // Context: this was the fix for the unintended breaking change in 4.60.2 - Config: testAccProviderCredentials_actWithCredsFromEnv_emptyString(pid), - ExternalProviders: map[string]resource.ExternalProvider{ - "google": { - VersionConstraint: "4.84.0", - Source: "hashicorp/google", - }, - }, - PlanOnly: true, - ExpectNonEmptyPlan: true, - }, - { - // Validation errors are expected in 5.0.0+ - // Context: we intentionally introduced the breaking change again in 5.0.0+ - Config: testAccProviderCredentials_actWithCredsFromEnv_emptyString(pid), - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - PlanOnly: true, - ExpectNonEmptyPlan: true, - ExpectError: regexp.MustCompile(`expected a non-empty string`), - }, - }, - }) -} - func TestAccProviderEmptyStrings(t *testing.T) { t.Parallel() diff --git a/mmv1/third_party/terraform/services/compute/go/data_source_google_compute_instance.go b/mmv1/third_party/terraform/services/compute/go/data_source_google_compute_instance.go index b2c37aef359f..f38a1f8971f2 100644 --- a/mmv1/third_party/terraform/services/compute/go/data_source_google_compute_instance.go +++ b/mmv1/third_party/terraform/services/compute/go/data_source_google_compute_instance.go @@ -58,7 +58,7 @@ func dataSourceGoogleComputeInstanceRead(d *schema.ResourceData, meta interface{ if err != nil { return err } - if err := d.Set("network_inferface", networkInterfaces); err != nil { + if err := d.Set("network_interface", networkInterfaces); err != nil { return err } diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_template_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_template_test.go.tmpl index da3487257612..e3e52c48dee5 100644 --- a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_template_test.go.tmpl +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_template_test.go.tmpl @@ -875,6 +875,9 @@ func TestAccComputeInstanceTemplate_invalidDiskType(t *testing.T) { } func TestAccComputeInstanceTemplate_withNamePrefix(t *testing.T) { + // Randomness from generated name suffix + acctest.SkipIfVcr(t) + t.Parallel() // 8 + 46 = 54 which is the valid max diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_region_backend_service_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_region_backend_service_test.go.tmpl index 5a2eed79d6b4..9d3d9e1bfbce 100644 --- a/mmv1/third_party/terraform/services/compute/go/resource_compute_region_backend_service_test.go.tmpl +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_region_backend_service_test.go.tmpl @@ -706,7 +706,7 @@ resource "google_compute_region_backend_service" "lipsum" { backend { group = google_compute_instance_group_manager.foobar.instance_group balancing_mode = "CONNECTION" -{{- if or (ne $.TargetVersionName "") (eq $.TargetVersionName "ga") }} +{{- if not (or (eq $.TargetVersionName "") (eq $.TargetVersionName "ga")) }} failover = true } @@ -776,7 +776,7 @@ resource "google_compute_region_backend_service" "lipsum" { backend { group = google_compute_instance_group_manager.foobar.instance_group balancing_mode = "CONNECTION" -{{- if or (ne $.TargetVersionName "") (eq $.TargetVersionName "ga") }} +{{- if not (or (eq $.TargetVersionName "") (eq $.TargetVersionName "ga")) }} failover = true } diff --git a/mmv1/third_party/terraform/services/container/go/node_config.go.tmpl b/mmv1/third_party/terraform/services/container/go/node_config.go.tmpl index 179d06855493..7492bd73572a 100644 --- a/mmv1/third_party/terraform/services/container/go/node_config.go.tmpl +++ b/mmv1/third_party/terraform/services/container/go/node_config.go.tmpl @@ -231,7 +231,7 @@ func schemaNodeConfig() *schema.Schema { Computed: true, Elem: &schema.Schema{Type: schema.TypeString}, Description: `The map of Kubernetes labels (key/value pairs) to be applied to each node. These will added in addition to any default label(s) that Kubernetes may apply to the node.`, - {{- if or (ne $.TargetVersionName "") (eq $.TargetVersionName "ga") }} + {{- if not (or (eq $.TargetVersionName "") (eq $.TargetVersionName "ga")) }} DiffSuppressFunc: containerNodePoolLabelsSuppress, {{- end }} }, @@ -1692,7 +1692,7 @@ func flattenWorkloadMetadataConfig(c *container.WorkloadMetadataConfig) []map[st } return result } -{{- if or (ne $.TargetVersionName "") (eq $.TargetVersionName "ga") }} +{{- if not (or (eq $.TargetVersionName "") (eq $.TargetVersionName "ga")) }} func flattenSandboxConfig(c *container.SandboxConfig) []map[string]interface{} { result := []map[string]interface{}{} if c != nil { diff --git a/mmv1/third_party/terraform/services/container/go/resource_container_cluster_test.go.tmpl b/mmv1/third_party/terraform/services/container/go/resource_container_cluster_test.go.tmpl index b46574d58170..baaae76280ee 100644 --- a/mmv1/third_party/terraform/services/container/go/resource_container_cluster_test.go.tmpl +++ b/mmv1/third_party/terraform/services/container/go/resource_container_cluster_test.go.tmpl @@ -1999,7 +1999,7 @@ func TestAccContainerCluster_withWorkloadMetadataConfig(t *testing.T) { }) } -{{ if or (ne $.TargetVersionName ``) (eq $.TargetVersionName `ga`) }} +{{ if not (or (eq $.TargetVersionName ``) (eq $.TargetVersionName `ga`)) }} func TestAccContainerCluster_withSandboxConfig(t *testing.T) { t.Parallel() @@ -7098,7 +7098,7 @@ resource "google_container_cluster" "with_workload_metadata_config" { `, clusterName, networkName, subnetworkName) } -{{ if or (ne $.TargetVersionName ``) (eq $.TargetVersionName `ga`) }} +{{ if not (or (eq $.TargetVersionName ``) (eq $.TargetVersionName `ga`)) }} func testAccContainerCluster_withSandboxConfig(clusterName, networkName, subnetworkName string) string { return fmt.Sprintf(` data "google_container_engine_versions" "central1a" { @@ -8816,7 +8816,7 @@ resource "google_container_cluster" "with_workload_identity_config" { } -{{ if or (ne $.TargetVersionName ``) (eq $.TargetVersionName `ga`) }} +{{ if not (or (eq $.TargetVersionName ``) (eq $.TargetVersionName `ga`)) }} func testAccContainerCluster_sharedVpc(org, billingId, projectName, name string, suffix string) string { return fmt.Sprintf(` resource "google_project" "host_project" { diff --git a/mmv1/third_party/terraform/services/container/go/resource_container_node_pool_test.go.tmpl b/mmv1/third_party/terraform/services/container/go/resource_container_node_pool_test.go.tmpl index 748e0cb13375..acf1ba6b7b82 100644 --- a/mmv1/third_party/terraform/services/container/go/resource_container_node_pool_test.go.tmpl +++ b/mmv1/third_party/terraform/services/container/go/resource_container_node_pool_test.go.tmpl @@ -482,7 +482,7 @@ func TestAccContainerNodePool_withWorkloadIdentityConfig(t *testing.T) { }) } -{{ if or (ne $.TargetVersionName ``) (eq $.TargetVersionName `ga`) }} +{{ if not (or (eq $.TargetVersionName ``) (eq $.TargetVersionName `ga`)) }} func TestAccContainerNodePool_withSandboxConfig(t *testing.T) { t.Parallel() @@ -835,7 +835,7 @@ resource "google_container_node_pool" "with_enable_private_nodes" { `, network, cluster, np, flag) } -{{ if or (ne $.TargetVersionName ``) (eq $.TargetVersionName `ga`) }} +{{ if not (or (eq $.TargetVersionName ``) (eq $.TargetVersionName `ga`)) }} func TestAccContainerNodePool_withBootDiskKmsKey(t *testing.T) { // Uses generated time-based rotation time acctest.SkipIfVcr(t) @@ -3078,7 +3078,7 @@ resource "google_container_node_pool" "with_workload_metadata_config" { `, projectID, cluster, networkName, subnetworkName, np) } -{{ if or (ne $.TargetVersionName ``) (eq $.TargetVersionName `ga`) }} +{{ if not (or (eq $.TargetVersionName ``) (eq $.TargetVersionName `ga`)) }} func testAccContainerNodePool_withSandboxConfig(cluster, np, networkName, subnetworkName string) string { return fmt.Sprintf(` data "google_container_engine_versions" "central1a" { @@ -3505,7 +3505,7 @@ resource "google_container_node_pool" "with_multi_nic" { `, network, network, network, network, network, network, cluster, np) } -{{ if or (ne $.TargetVersionName ``) (eq $.TargetVersionName `ga`) }} +{{ if not (or (eq $.TargetVersionName ``) (eq $.TargetVersionName `ga`)) }} func testAccContainerNodePool_withBootDiskKmsKey(cluster, np, networkName, subnetworkName string) string { return fmt.Sprintf(` data "google_container_engine_versions" "central1a" { diff --git a/mmv1/third_party/terraform/services/dns/go/resource_dns_managed_zone_test.go.tmpl b/mmv1/third_party/terraform/services/dns/go/resource_dns_managed_zone_test.go.tmpl index 73f97903ba79..37b7f7656c91 100644 --- a/mmv1/third_party/terraform/services/dns/go/resource_dns_managed_zone_test.go.tmpl +++ b/mmv1/third_party/terraform/services/dns/go/resource_dns_managed_zone_test.go.tmpl @@ -199,7 +199,7 @@ func TestAccDNSManagedZone_cloudLoggingConfigUpdate(t *testing.T) { }) } -{{ if or (ne $.TargetVersionName ``) (eq $.TargetVersionName `ga`) }} +{{ if not (or (eq $.TargetVersionName ``) (eq $.TargetVersionName `ga`)) }} func TestAccDNSManagedZone_reverseLookup(t *testing.T) { t.Parallel() @@ -531,7 +531,7 @@ resource "google_dns_managed_zone" "foobar" { `, suffix, suffix, enableCloudLogging) } -{{ if or (ne $.TargetVersionName ``) (eq $.TargetVersionName `ga`) }} +{{ if not (or (eq $.TargetVersionName ``) (eq $.TargetVersionName `ga`)) }} func testAccDnsManagedZone_reverseLookup(suffix string) string { return fmt.Sprintf(` resource "google_dns_managed_zone" "reverse" { diff --git a/mmv1/third_party/terraform/services/iambeta/go/resource_iam_workload_identity_pool_provider_test.go.tmpl b/mmv1/third_party/terraform/services/iambeta/go/resource_iam_workload_identity_pool_provider_test.go.tmpl index 3bf1357f820e..961ca73d5a56 100644 --- a/mmv1/third_party/terraform/services/iambeta/go/resource_iam_workload_identity_pool_provider_test.go.tmpl +++ b/mmv1/third_party/terraform/services/iambeta/go/resource_iam_workload_identity_pool_provider_test.go.tmpl @@ -239,4 +239,96 @@ resource "google_iam_workload_identity_pool_provider" "my_provider" { `, context) } +func TestAccIAMBetaWorkloadIdentityPoolProvider_x509(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckIAMBetaWorkloadIdentityPoolProviderDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccIAMBetaWorkloadIdentityPoolProvider_x509_full(context), + }, + { + ResourceName: "google_iam_workload_identity_pool_provider.example", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"workload_identity_pool_id", "workload_identity_pool_provider_id"}, + }, + { + Config: testAccIAMBetaWorkloadIdentityPoolProvider_x509_update(context), + }, + { + ResourceName: "google_iam_workload_identity_pool_provider.example", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"workload_identity_pool_id", "workload_identity_pool_provider_id"}, + }, + }, + }) +} + +func testAccIAMBetaWorkloadIdentityPoolProvider_x509_full(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_iam_workload_identity_pool" "pool" { + workload_identity_pool_id = "tf-test-example-pool%{random_suffix}" +} + +resource "google_iam_workload_identity_pool_provider" "example" { + workload_identity_pool_id = google_iam_workload_identity_pool.pool.workload_identity_pool_id + workload_identity_pool_provider_id = "tf-test-example-prvdr%{random_suffix}" + display_name = "Name of provider" + description = "X.509 identity pool provider for automated test" + disabled = true + attribute_mapping = { + "google.subject" = "assertion.subject.dn.cn" + } + x509 { + trust_store { + trust_anchors { + pem_certificate = file("test-fixtures/trust_anchor.pem") + } + intermediate_cas { + pem_certificate = file("test-fixtures/intermediate_ca.pem") + } + } + } +} +`, context) +} + +func testAccIAMBetaWorkloadIdentityPoolProvider_x509_update(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_iam_workload_identity_pool" "pool" { + workload_identity_pool_id = "tf-test-example-pool%{random_suffix}" +} + +resource "google_iam_workload_identity_pool_provider" "example" { + workload_identity_pool_id = google_iam_workload_identity_pool.pool.workload_identity_pool_id + workload_identity_pool_provider_id = "tf-test-example-prvdr%{random_suffix}" + display_name = "Name of provider" + description = "X.509 identity pool provider for automated test" + disabled = true + attribute_mapping = { + "google.subject" = "assertion.subject.dn.cn" + } + x509 { + trust_store { + trust_anchors { + pem_certificate = file("test-fixtures/trust_anchor_updated.pem") + } + trust_anchors { + pem_certificate = file("test-fixtures/intermediate_ca.pem") + } + } + } +} +`, context) +} + {{ end }} From 94d7abdcfa1c272f1e62d3859edca54e181959aa Mon Sep 17 00:00:00 2001 From: Sarah French <15078782+SarahFrench@users.noreply.github.com> Date: Thu, 5 Sep 2024 18:38:14 +0100 Subject: [PATCH 41/60] Move validator tests into their own explicit file (#11590) --- .../framework_provider_internal_test.go | 79 +----------------- .../fwprovider/framework_validators_test.go | 82 +++++++++++++++++++ 2 files changed, 85 insertions(+), 76 deletions(-) create mode 100644 mmv1/third_party/terraform/fwprovider/framework_validators_test.go diff --git a/mmv1/third_party/terraform/fwprovider/framework_provider_internal_test.go b/mmv1/third_party/terraform/fwprovider/framework_provider_internal_test.go index 8fe2592b21a2..8c4eff047541 100644 --- a/mmv1/third_party/terraform/fwprovider/framework_provider_internal_test.go +++ b/mmv1/third_party/terraform/fwprovider/framework_provider_internal_test.go @@ -1,85 +1,12 @@ -package fwprovider +package fwprovider_test import ( - "context" - "io/ioutil" "testing" - "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/provider" - "github.com/hashicorp/terraform-plugin-framework/schema/validator" - "github.com/hashicorp/terraform-plugin-framework/types" - - transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/fwprovider" ) func TestFrameworkProvider_impl(t *testing.T) { - var _ provider.ProviderWithMetaSchema = New() -} - -func TestFrameworkProvider_CredentialsValidator(t *testing.T) { - cases := map[string]struct { - ConfigValue func(t *testing.T) types.String - ExpectedWarningCount int - ExpectedErrorCount int - }{ - "configuring credentials as a path to a credentials JSON file is valid": { - ConfigValue: func(t *testing.T) types.String { - return types.StringValue(transport_tpg.TestFakeCredentialsPath) // Path to a test fixture - }, - }, - "configuring credentials as a path to a non-existant file is NOT valid": { - ConfigValue: func(t *testing.T) types.String { - return types.StringValue("./this/path/doesnt/exist.json") // Doesn't exist - }, - ExpectedErrorCount: 1, - }, - "configuring credentials as a credentials JSON string is valid": { - ConfigValue: func(t *testing.T) types.String { - contents, err := ioutil.ReadFile(transport_tpg.TestFakeCredentialsPath) - if err != nil { - t.Fatalf("Unexpected error: %s", err) - } - stringContents := string(contents) - return types.StringValue(stringContents) - }, - }, - "configuring credentials as an empty string is not valid": { - ConfigValue: func(t *testing.T) types.String { - return types.StringValue("") - }, - ExpectedErrorCount: 1, - }, - "leaving credentials unconfigured is valid": { - ConfigValue: func(t *testing.T) types.String { - return types.StringNull() - }, - }, - } - - for tn, tc := range cases { - t.Run(tn, func(t *testing.T) { - // Arrange - req := validator.StringRequest{ - ConfigValue: tc.ConfigValue(t), - } - - resp := validator.StringResponse{ - Diagnostics: diag.Diagnostics{}, - } - - cv := CredentialsValidator() - - // Act - cv.ValidateString(context.Background(), req, &resp) - - // Assert - if resp.Diagnostics.WarningsCount() > tc.ExpectedWarningCount { - t.Errorf("Expected %d warnings, got %d", tc.ExpectedWarningCount, resp.Diagnostics.WarningsCount()) - } - if resp.Diagnostics.ErrorsCount() > tc.ExpectedErrorCount { - t.Errorf("Expected %d errors, got %d", tc.ExpectedErrorCount, resp.Diagnostics.ErrorsCount()) - } - }) - } + var _ provider.ProviderWithMetaSchema = fwprovider.New() } diff --git a/mmv1/third_party/terraform/fwprovider/framework_validators_test.go b/mmv1/third_party/terraform/fwprovider/framework_validators_test.go new file mode 100644 index 000000000000..535fd1774f98 --- /dev/null +++ b/mmv1/third_party/terraform/fwprovider/framework_validators_test.go @@ -0,0 +1,82 @@ +package fwprovider_test + +import ( + "context" + "io/ioutil" + "testing" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + + "github.com/hashicorp/terraform-provider-google/google/fwprovider" + + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func TestFrameworkProvider_CredentialsValidator(t *testing.T) { + cases := map[string]struct { + ConfigValue func(t *testing.T) types.String + ExpectedWarningCount int + ExpectedErrorCount int + }{ + "configuring credentials as a path to a credentials JSON file is valid": { + ConfigValue: func(t *testing.T) types.String { + return types.StringValue(transport_tpg.TestFakeCredentialsPath) // Path to a test fixture + }, + }, + "configuring credentials as a path to a non-existant file is NOT valid": { + ConfigValue: func(t *testing.T) types.String { + return types.StringValue("./this/path/doesnt/exist.json") // Doesn't exist + }, + ExpectedErrorCount: 1, + }, + "configuring credentials as a credentials JSON string is valid": { + ConfigValue: func(t *testing.T) types.String { + contents, err := ioutil.ReadFile(transport_tpg.TestFakeCredentialsPath) + if err != nil { + t.Fatalf("Unexpected error: %s", err) + } + stringContents := string(contents) + return types.StringValue(stringContents) + }, + }, + "configuring credentials as an empty string is not valid": { + ConfigValue: func(t *testing.T) types.String { + return types.StringValue("") + }, + ExpectedErrorCount: 1, + }, + "leaving credentials unconfigured is valid": { + ConfigValue: func(t *testing.T) types.String { + return types.StringNull() + }, + }, + } + + for tn, tc := range cases { + t.Run(tn, func(t *testing.T) { + // Arrange + req := validator.StringRequest{ + ConfigValue: tc.ConfigValue(t), + } + + resp := validator.StringResponse{ + Diagnostics: diag.Diagnostics{}, + } + + cv := fwprovider.CredentialsValidator() + + // Act + cv.ValidateString(context.Background(), req, &resp) + + // Assert + if resp.Diagnostics.WarningsCount() > tc.ExpectedWarningCount { + t.Errorf("Expected %d warnings, got %d", tc.ExpectedWarningCount, resp.Diagnostics.WarningsCount()) + } + if resp.Diagnostics.ErrorsCount() > tc.ExpectedErrorCount { + t.Errorf("Expected %d errors, got %d", tc.ExpectedErrorCount, resp.Diagnostics.ErrorsCount()) + } + }) + } +} From 4f3e13ced4778621c4ea851e4d0af91ad8abbceb Mon Sep 17 00:00:00 2001 From: Mehul3217 <44620455+Mehul3217@users.noreply.github.com> Date: Thu, 5 Sep 2024 23:26:38 +0530 Subject: [PATCH 42/60] add Large Volumes support for netapp volumes (#11601) --- mmv1/products/netapp/Volume.yaml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/mmv1/products/netapp/Volume.yaml b/mmv1/products/netapp/Volume.yaml index b1874546e61b..4c539afe40be 100644 --- a/mmv1/products/netapp/Volume.yaml +++ b/mmv1/products/netapp/Volume.yaml @@ -494,3 +494,12 @@ properties: Specifies the replica zone for regional volume. output: true min_version: beta + - !ruby/object:Api::Type::Boolean + name: 'largeCapacity' + description: | + Optional. Flag indicating if the volume will be a large capacity volume or a regular volume. + - !ruby/object:Api::Type::Boolean + name: 'multipleEndpoints' + description: | + Optional. Flag indicating if the volume will have an IP address per node for volumes supporting multiple IP endpoints. + Only the volume with largeCapacity will be allowed to have multiple endpoints. From 60228fb906eaa1bb6a160acde4ea683c62da154a Mon Sep 17 00:00:00 2001 From: Thomas Rodgers Date: Thu, 5 Sep 2024 12:13:12 -0700 Subject: [PATCH 43/60] Make reviewer assignment always use main branch (#11445) --- .ci/magician/cmd/request_reviewer_test.go | 3 + .ci/magician/github/membership.go | 91 ------------------ .ci/magician/github/membership_data.go | 94 +++++++++++++++++++ .../github/reviewer_assignment_test.go | 3 + .github/workflows/request-reviewer.yml | 2 + 5 files changed, 102 insertions(+), 91 deletions(-) create mode 100644 .ci/magician/github/membership_data.go diff --git a/.ci/magician/cmd/request_reviewer_test.go b/.ci/magician/cmd/request_reviewer_test.go index f40288ddb2ab..b2af19be6b2d 100644 --- a/.ci/magician/cmd/request_reviewer_test.go +++ b/.ci/magician/cmd/request_reviewer_test.go @@ -25,6 +25,9 @@ import ( func TestExecRequestReviewer(t *testing.T) { availableReviewers := github.AvailableReviewers() + if len(availableReviewers) < 3 { + t.Fatalf("not enough available reviewers (%v) to run TestExecRequestReviewer (need at least 3)", availableReviewers) + } cases := map[string]struct { pullRequest github.PullRequest requestedReviewers []string diff --git a/.ci/magician/github/membership.go b/.ci/magician/github/membership.go index c627cc14624a..d3694ccd7255 100644 --- a/.ci/magician/github/membership.go +++ b/.ci/magician/github/membership.go @@ -24,97 +24,6 @@ import ( "golang.org/x/exp/maps" ) -var ( - // This is for the random-assignee rotation. - reviewerRotation = map[string]struct{}{ - "slevenick": struct{}{}, - "c2thorn": struct{}{}, - "rileykarson": struct{}{}, - "melinath": struct{}{}, - "ScottSuarez": struct{}{}, - "shuyama1": struct{}{}, - "SarahFrench": struct{}{}, - "roaks3": struct{}{}, - "zli82016": struct{}{}, - "trodge": struct{}{}, - "hao-nan-li": struct{}{}, - "NickElliot": struct{}{}, - "BBBmau": struct{}{}, - } - - // This is for new team members who are onboarding - trustedContributors = map[string]struct{}{} - - // This is for reviewers who are "on vacation": will not receive new review assignments but will still receive re-requests for assigned PRs. - // User can specify the time zone like this, and following the example below: - pdtLoc, _ = time.LoadLocation("America/Los_Angeles") - bstLoc, _ = time.LoadLocation("Europe/London") - onVacationReviewers = []onVacationReviewer{ - // Example: taking vacation from 2024-03-28 to 2024-04-02 in pdt time zone. - // both ends are inclusive: - // { - // id: "xyz", - // startDate: newDate(2024, 3, 28, pdtLoc), - // endDate: newDate(2024, 4, 2, pdtLoc), - // }, - { - id: "hao-nan-li", - startDate: newDate(2024, 4, 11, pdtLoc), - endDate: newDate(2024, 6, 14, pdtLoc), - }, - { - id: "ScottSuarez", - startDate: newDate(2024, 4, 30, pdtLoc), - endDate: newDate(2024, 7, 31, pdtLoc), - }, - { - id: "SarahFrench", - startDate: newDate(2024, 8, 2, bstLoc), - endDate: newDate(2024, 8, 6, bstLoc), - }, - { - id: "shuyama1", - startDate: newDate(2024, 5, 22, pdtLoc), - endDate: newDate(2024, 5, 28, pdtLoc), - }, - { - id: "melinath", - startDate: newDate(2024, 6, 26, pdtLoc), - endDate: newDate(2024, 7, 22, pdtLoc), - }, - { - id: "slevenick", - startDate: newDate(2024, 7, 5, pdtLoc), - endDate: newDate(2024, 7, 16, pdtLoc), - }, - { - id: "c2thorn", - startDate: newDate(2024, 7, 10, pdtLoc), - endDate: newDate(2024, 7, 16, pdtLoc), - }, - { - id: "rileykarson", - startDate: newDate(2024, 7, 18, pdtLoc), - endDate: newDate(2024, 8, 10, pdtLoc), - }, - { - id: "roaks3", - startDate: newDate(2024, 8, 2, pdtLoc), - endDate: newDate(2024, 8, 9, pdtLoc), - }, - { - id: "slevenick", - startDate: newDate(2024, 8, 10, pdtLoc), - endDate: newDate(2024, 8, 17, pdtLoc), - }, - { - id: "trodge", - startDate: newDate(2024, 8, 24, pdtLoc), - endDate: newDate(2024, 9, 2, pdtLoc), - }, - } -) - type UserType int64 type date struct { diff --git a/.ci/magician/github/membership_data.go b/.ci/magician/github/membership_data.go new file mode 100644 index 000000000000..7b8f2964faee --- /dev/null +++ b/.ci/magician/github/membership_data.go @@ -0,0 +1,94 @@ +package github + +import "time" + +var ( + // This is for the random-assignee rotation. + reviewerRotation = map[string]struct{}{ + "slevenick": {}, + "c2thorn": {}, + "rileykarson": {}, + "melinath": {}, + "ScottSuarez": {}, + "shuyama1": {}, + "SarahFrench": {}, + "roaks3": {}, + "zli82016": {}, + "trodge": {}, + "hao-nan-li": {}, + "NickElliot": {}, + "BBBmau": {}, + } + + // This is for new team members who are onboarding + trustedContributors = map[string]struct{}{} + + // This is for reviewers who are "on vacation": will not receive new review assignments but will still receive re-requests for assigned PRs. + // User can specify the time zone like this, and following the example below: + pdtLoc, _ = time.LoadLocation("America/Los_Angeles") + bstLoc, _ = time.LoadLocation("Europe/London") + onVacationReviewers = []onVacationReviewer{ + // Example: taking vacation from 2024-03-28 to 2024-04-02 in pdt time zone. + // both ends are inclusive: + // { + // id: "xyz", + // startDate: newDate(2024, 3, 28, pdtLoc), + // endDate: newDate(2024, 4, 2, pdtLoc), + // }, + { + id: "hao-nan-li", + startDate: newDate(2024, 4, 11, pdtLoc), + endDate: newDate(2024, 6, 14, pdtLoc), + }, + { + id: "ScottSuarez", + startDate: newDate(2024, 4, 30, pdtLoc), + endDate: newDate(2024, 7, 31, pdtLoc), + }, + { + id: "SarahFrench", + startDate: newDate(2024, 8, 2, bstLoc), + endDate: newDate(2024, 8, 6, bstLoc), + }, + { + id: "shuyama1", + startDate: newDate(2024, 5, 22, pdtLoc), + endDate: newDate(2024, 5, 28, pdtLoc), + }, + { + id: "melinath", + startDate: newDate(2024, 6, 26, pdtLoc), + endDate: newDate(2024, 7, 22, pdtLoc), + }, + { + id: "slevenick", + startDate: newDate(2024, 7, 5, pdtLoc), + endDate: newDate(2024, 7, 16, pdtLoc), + }, + { + id: "c2thorn", + startDate: newDate(2024, 7, 10, pdtLoc), + endDate: newDate(2024, 7, 16, pdtLoc), + }, + { + id: "rileykarson", + startDate: newDate(2024, 7, 18, pdtLoc), + endDate: newDate(2024, 8, 10, pdtLoc), + }, + { + id: "roaks3", + startDate: newDate(2024, 8, 2, pdtLoc), + endDate: newDate(2024, 8, 9, pdtLoc), + }, + { + id: "slevenick", + startDate: newDate(2024, 8, 10, pdtLoc), + endDate: newDate(2024, 8, 17, pdtLoc), + }, + { + id: "trodge", + startDate: newDate(2024, 8, 24, pdtLoc), + endDate: newDate(2024, 9, 2, pdtLoc), + }, + } +) diff --git a/.ci/magician/github/reviewer_assignment_test.go b/.ci/magician/github/reviewer_assignment_test.go index ad85f5232c32..6a2cb33b6a25 100644 --- a/.ci/magician/github/reviewer_assignment_test.go +++ b/.ci/magician/github/reviewer_assignment_test.go @@ -24,6 +24,9 @@ import ( ) func TestChooseCoreReviewers(t *testing.T) { + if len(AvailableReviewers()) < 2 { + t.Fatalf("not enough available reviewers (%v) to test (need at least 2)", AvailableReviewers()) + } firstCoreReviewer := AvailableReviewers()[0] secondCoreReviewer := AvailableReviewers()[1] cases := map[string]struct { diff --git a/.github/workflows/request-reviewer.yml b/.github/workflows/request-reviewer.yml index 45c5ffacd4e6..7748d32f8e04 100644 --- a/.github/workflows/request-reviewer.yml +++ b/.github/workflows/request-reviewer.yml @@ -24,6 +24,8 @@ jobs: steps: - name: Checkout Repository uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.2 + with: + ref: main - name: Set up Go uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 with: From 2c115a2c64c6cc8519df9e6ee66b4ac5474b32e9 Mon Sep 17 00:00:00 2001 From: mihhalj Date: Thu, 5 Sep 2024 21:26:46 +0200 Subject: [PATCH 44/60] Add new compute-network-firewall-policy-with-rules resource (#11524) Co-authored-by: Nick Elliot --- .../NetworkFirewallPolicyWithRules.yaml | 569 ++++++++++++++++++ ..._network_firewall_policy_with_rules.go.erb | 54 ++ ..._network_firewall_policy_with_rules.go.erb | 16 + ..._network_firewall_policy_with_rules.go.erb | 3 + ...ork_firewall_policy_with_rules_full.tf.erb | 116 ++++ ..._network_firewall_policy_with_rules.go.erb | 31 + ..._network_firewall_policy_with_rules.go.erb | 15 + ...ork_firewall_policy_with_rules_test.go.erb | 264 ++++++++ 8 files changed, 1068 insertions(+) create mode 100644 mmv1/products/compute/NetworkFirewallPolicyWithRules.yaml create mode 100644 mmv1/templates/terraform/constants/resource_compute_network_firewall_policy_with_rules.go.erb create mode 100644 mmv1/templates/terraform/decoders/resource_compute_network_firewall_policy_with_rules.go.erb create mode 100644 mmv1/templates/terraform/encoders/resource_compute_network_firewall_policy_with_rules.go.erb create mode 100644 mmv1/templates/terraform/examples/compute_network_firewall_policy_with_rules_full.tf.erb create mode 100644 mmv1/templates/terraform/post_create/resource_compute_network_firewall_policy_with_rules.go.erb create mode 100644 mmv1/templates/terraform/update_encoder/resource_compute_network_firewall_policy_with_rules.go.erb create mode 100644 mmv1/third_party/terraform/services/compute/resource_compute_network_firewall_policy_with_rules_test.go.erb diff --git a/mmv1/products/compute/NetworkFirewallPolicyWithRules.yaml b/mmv1/products/compute/NetworkFirewallPolicyWithRules.yaml new file mode 100644 index 000000000000..60df0a3589d7 --- /dev/null +++ b/mmv1/products/compute/NetworkFirewallPolicyWithRules.yaml @@ -0,0 +1,569 @@ +# Copyright 2023 Google Inc. +# Licensed under the Apache License, Version 2.0 (the License); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- !ruby/object:Api::Resource +name: NetworkFirewallPolicyWithRules +min_version: beta +base_url: projects/{{project}}/global/firewallPolicies +create_url: projects/{{project}}/global/firewallPolicies +self_link: projects/{{project}}/global/firewallPolicies/{{name}} +update_verb: :PATCH +description: "The Compute NetworkFirewallPolicy with rules resource" +legacy_long_form_project: true +async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + kind: 'compute#operation' + path: 'name' + base_url: '{{op_id}}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'targetLink' + status: !ruby/object:Api::OpAsync::Status + path: 'status' + complete: 'DONE' + allowed: + - 'PENDING' + - 'RUNNING' + - 'DONE' + error: !ruby/object:Api::OpAsync::Error + path: 'error/errors' + message: 'message' +examples: + - !ruby/object:Provider::Terraform::Examples + name: 'compute_network_firewall_policy_with_rules_full' + primary_resource_id: 'network-firewall-policy-with-rules' + vars: + policy_name: 'tf-fw-policy-with-rules' + address_group_name: 'tf-address-group' + tag_key_name: 'tf-tag-key' + tag_value_name: 'tf-tag-value' + security_profile_group_name: 'tf-security-profile-group' + security_profile_name: 'tf-security-profile' + test_env_vars: + org_id: :ORG_ID +custom_code: !ruby/object:Provider::Terraform::CustomCode + constants: templates/terraform/constants/resource_compute_network_firewall_policy_with_rules.go.erb + encoder: templates/terraform/encoders/resource_compute_network_firewall_policy_with_rules.go.erb + decoder: templates/terraform/decoders/resource_compute_network_firewall_policy_with_rules.go.erb + update_encoder: templates/terraform/update_encoder/resource_compute_network_firewall_policy_with_rules.go.erb + post_create: templates/terraform/post_create/resource_compute_network_firewall_policy_with_rules.go.erb +properties: + - !ruby/object:Api::Type::String + name: creationTimestamp + description: Creation timestamp in RFC3339 text format. + output: true + - !ruby/object:Api::Type::String + name: name + description: | + User-provided name of the Network firewall policy. + The name should be unique in the project in which the firewall policy is created. + The name must be 1-63 characters long, and comply with RFC1035. Specifically, + the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? + which means the first character must be a lowercase letter, and all following characters must be a dash, + lowercase letter, or digit, except the last character, which cannot be a dash. + required: true + immutable: true + - !ruby/object:Api::Type::String + name: networkFirewallPolicyId + description: The unique identifier for the resource. This identifier is defined by the server. + output: true + api_name: id + - !ruby/object:Api::Type::String + name: description + description: An optional description of this resource. + - !ruby/object:Api::Type::Array + name: 'rule' + api_name: 'rules' + description: A list of firewall policy rules. + required: true + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'description' + description: | + A description of the rule. + - !ruby/object:Api::Type::String + name: 'ruleName' + description: | + An optional name for the rule. This field is not a unique identifier + and can be updated. + - !ruby/object:Api::Type::Integer + name: 'priority' + description: | + An integer indicating the priority of a rule in the list. The priority must be a value + between 0 and 2147483647. Rules are evaluated from highest to lowest priority where 0 is the + highest priority and 2147483647 is the lowest priority. + required: true + - !ruby/object:Api::Type::NestedObject + name: 'match' + description: + A match condition that incoming traffic is evaluated against. If it + evaluates to true, the corresponding 'action' is enforced. + required: true + properties: + - !ruby/object:Api::Type::Array + name: 'srcIpRanges' + description: | + Source IP address range in CIDR format. Required for + INGRESS rules. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'destIpRanges' + description: | + Destination IP address range in CIDR format. Required for + EGRESS rules. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + item_type: Api::Type::String + name: 'srcAddressGroups' + description: | + Address groups which should be matched against the traffic source. + Maximum number of source address groups is 10. + - !ruby/object:Api::Type::Array + item_type: Api::Type::String + name: 'destAddressGroups' + description: | + Address groups which should be matched against the traffic destination. + Maximum number of destination address groups is 10. + - !ruby/object:Api::Type::Array + item_type: Api::Type::String + name: 'srcFqdns' + description: | + Fully Qualified Domain Name (FQDN) which should be matched against + traffic source. Maximum number of source fqdn allowed is 100. + - !ruby/object:Api::Type::Array + item_type: Api::Type::String + name: 'destFqdns' + description: | + Fully Qualified Domain Name (FQDN) which should be matched against + traffic destination. Maximum number of destination fqdn allowed is 100. + - !ruby/object:Api::Type::Array + item_type: Api::Type::String + name: 'srcRegionCodes' + description: | + Region codes whose IP addresses will be used to match for source + of traffic. Should be specified as 2 letter country code defined as per + ISO 3166 alpha-2 country codes. ex."US" + Maximum number of source region codes allowed is 5000. + - !ruby/object:Api::Type::Array + item_type: Api::Type::String + name: 'destRegionCodes' + description: | + Region codes whose IP addresses will be used to match for destination + of traffic. Should be specified as 2 letter country code defined as per + ISO 3166 alpha-2 country codes. ex."US" + Maximum number of destination region codes allowed is 5000. + - !ruby/object:Api::Type::Array + item_type: Api::Type::String + name: 'srcThreatIntelligences' + description: | + Names of Network Threat Intelligence lists. + The IPs in these lists will be matched against traffic source. + - !ruby/object:Api::Type::Array + item_type: Api::Type::String + name: 'destThreatIntelligences' + description: | + Names of Network Threat Intelligence lists. + The IPs in these lists will be matched against traffic destination. + - !ruby/object:Api::Type::Array + name: 'layer4Config' + api_name: 'layer4Configs' + description: | + Pairs of IP protocols and ports that the rule should match. + required: true + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'ipProtocol' + description: | + The IP protocol to which this rule applies. The protocol + type is required when creating a firewall rule. + This value can either be one of the following well + known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), + or the IP protocol number. + required: true + - !ruby/object:Api::Type::Array + item_type: Api::Type::String + name: 'ports' + description: | + An optional list of ports to which this rule applies. This field + is only applicable for UDP or TCP protocol. Each entry must be + either an integer or a range. If not specified, this rule + applies to connections through any port. + Example inputs include: ["22"], ["80","443"], and + ["12345-12349"]. + - !ruby/object:Api::Type::Array + name: 'srcSecureTag' + api_name: 'srcSecureTags' + description: | + List of secure tag values, which should be matched at the source + of the traffic. + For INGRESS rule, if all the srcSecureTag are INEFFECTIVE, + and there is no srcIpRange, this rule will be ignored. + Maximum number of source tag values allowed is 256. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'name' + description: | + Name of the secure tag, created with TagManager's TagValue API. + @pattern tagValues/[0-9]+ + - !ruby/object:Api::Type::Enum + name: 'state' + output: true + description: | + [Output Only] State of the secure tag, either `EFFECTIVE` or + `INEFFECTIVE`. A secure tag is `INEFFECTIVE` when it is deleted + or its network is deleted. + values: + - :EFFECTIVE + - :INEFFECTIVE + - !ruby/object:Api::Type::Array + name: 'targetSecureTag' + api_name: 'targetSecureTags' + description: | + A list of secure tags that controls which instances the firewall rule + applies to. If targetSecureTag are specified, then the + firewall rule applies only to instances in the VPC network that have one + of those EFFECTIVE secure tags, if all the target_secure_tag are in + INEFFECTIVE state, then this rule will be ignored. + targetSecureTag may not be set at the same time as + targetServiceAccounts. + If neither targetServiceAccounts nor + targetSecureTag are specified, the firewall rule applies + to all instances on the specified network. + Maximum number of target label tags allowed is 256. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'name' + description: | + Name of the secure tag, created with TagManager's TagValue API. + @pattern tagValues/[0-9]+ + - !ruby/object:Api::Type::Enum + name: 'state' + output: true + description: | + [Output Only] State of the secure tag, either `EFFECTIVE` or + `INEFFECTIVE`. A secure tag is `INEFFECTIVE` when it is deleted + or its network is deleted. + values: + - :EFFECTIVE + - :INEFFECTIVE + - !ruby/object:Api::Type::String + name: 'action' + description: | + The Action to perform when the client connection triggers the rule. Can currently be either + "allow", "deny", "apply_security_profile_group" or "goto_next". + required: true + - !ruby/object:Api::Type::Enum + name: 'direction' + description: | + The direction in which this rule applies. If unspecified an INGRESS rule is created. + values: + - :INGRESS + - :EGRESS + - !ruby/object:Api::Type::Boolean + name: 'enableLogging' + description: | + Denotes whether to enable logging for a particular rule. + If logging is enabled, logs will be exported to the + configured export destination in Stackdriver. + send_empty_value: true + - !ruby/object:Api::Type::Array + name: 'targetServiceAccounts' + description: | + A list of service accounts indicating the sets of + instances that are applied with this rule. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'securityProfileGroup' + description: | + A fully-qualified URL of a SecurityProfile resource instance. + Example: + https://networksecurity.googleapis.com/v1/projects/{project}/locations/{location}/securityProfileGroups/my-security-profile-group + Must be specified if action is 'apply_security_profile_group'. + - !ruby/object:Api::Type::Boolean + name: 'tlsInspect' + description: | + Boolean flag indicating if the traffic should be TLS decrypted. + It can be set only if action = 'apply_security_profile_group' and cannot be set for other actions. + - !ruby/object:Api::Type::Boolean + name: 'disabled' + description: | + Denotes whether the firewall policy rule is disabled. When set to true, + the firewall policy rule is not enforced and traffic behaves as if it did + not exist. If this is unspecified, the firewall policy rule will be + enabled. + - !ruby/object:Api::Type::Array + name: 'predefinedRules' + description: A list of firewall policy pre-defined rules. + output: true + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'description' + output: true + description: | + A description of the rule. + - !ruby/object:Api::Type::String + name: 'ruleName' + output: true + description: | + An optional name for the rule. This field is not a unique identifier + and can be updated. + - !ruby/object:Api::Type::Integer + name: 'priority' + output: true + description: | + An integer indicating the priority of a rule in the list. The priority must be a value + between 0 and 2147483647. Rules are evaluated from highest to lowest priority where 0 is the + highest priority and 2147483647 is the lowest priority. + - !ruby/object:Api::Type::NestedObject + name: 'match' + output: true + description: + A match condition that incoming traffic is evaluated against. If it + evaluates to true, the corresponding 'action' is enforced. + properties: + - !ruby/object:Api::Type::Array + name: 'srcIpRanges' + output: true + description: | + Source IP address range in CIDR format. Required for + INGRESS rules. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'destIpRanges' + output: true + description: | + Destination IP address range in CIDR format. Required for + EGRESS rules. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + item_type: Api::Type::String + output: true + name: 'srcAddressGroups' + description: | + Address groups which should be matched against the traffic source. + Maximum number of source address groups is 10. + - !ruby/object:Api::Type::Array + item_type: Api::Type::String + output: true + name: 'destAddressGroups' + description: | + Address groups which should be matched against the traffic destination. + Maximum number of destination address groups is 10. + - !ruby/object:Api::Type::Array + item_type: Api::Type::String + name: 'srcFqdns' + output: true + description: | + Fully Qualified Domain Name (FQDN) which should be matched against + traffic source. Maximum number of source fqdn allowed is 100. + - !ruby/object:Api::Type::Array + item_type: Api::Type::String + name: 'destFqdns' + output: true + description: | + Fully Qualified Domain Name (FQDN) which should be matched against + traffic destination. Maximum number of destination fqdn allowed is 100. + - !ruby/object:Api::Type::Array + item_type: Api::Type::String + name: 'srcRegionCodes' + output: true + description: | + Region codes whose IP addresses will be used to match for source + of traffic. Should be specified as 2 letter country code defined as per + ISO 3166 alpha-2 country codes. ex."US" + Maximum number of source region codes allowed is 5000. + - !ruby/object:Api::Type::Array + item_type: Api::Type::String + name: 'destRegionCodes' + output: true + description: | + Region codes whose IP addresses will be used to match for destination + of traffic. Should be specified as 2 letter country code defined as per + ISO 3166 alpha-2 country codes. ex."US" + Maximum number of destination region codes allowed is 5000. + - !ruby/object:Api::Type::Array + item_type: Api::Type::String + name: 'srcThreatIntelligences' + output: true + description: | + Names of Network Threat Intelligence lists. + The IPs in these lists will be matched against traffic source. + - !ruby/object:Api::Type::Array + item_type: Api::Type::String + name: 'destThreatIntelligences' + output: true + description: | + Names of Network Threat Intelligence lists. + The IPs in these lists will be matched against traffic destination. + - !ruby/object:Api::Type::Array + name: 'layer4Config' + output: true + api_name: 'layer4Configs' + description: | + Pairs of IP protocols and ports that the rule should match. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'ipProtocol' + output: true + description: | + The IP protocol to which this rule applies. The protocol + type is required when creating a firewall rule. + This value can either be one of the following well + known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), + or the IP protocol number. + - !ruby/object:Api::Type::Array + item_type: Api::Type::String + name: 'ports' + output: true + description: | + An optional list of ports to which this rule applies. This field + is only applicable for UDP or TCP protocol. Each entry must be + either an integer or a range. If not specified, this rule + applies to connections through any port. + Example inputs include: ["22"], ["80","443"], and + ["12345-12349"]. + - !ruby/object:Api::Type::Array + name: 'srcSecureTag' + api_name: 'srcSecureTags' + output: true + description: | + List of secure tag values, which should be matched at the source + of the traffic. + For INGRESS rule, if all the srcSecureTag are INEFFECTIVE, + and there is no srcIpRange, this rule will be ignored. + Maximum number of source tag values allowed is 256. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'name' + output: true + description: | + Name of the secure tag, created with TagManager's TagValue API. + @pattern tagValues/[0-9]+ + - !ruby/object:Api::Type::Enum + name: 'state' + output: true + description: | + [Output Only] State of the secure tag, either `EFFECTIVE` or + `INEFFECTIVE`. A secure tag is `INEFFECTIVE` when it is deleted + or its network is deleted. + values: + - :EFFECTIVE + - :INEFFECTIVE + - !ruby/object:Api::Type::Array + name: 'targetSecureTag' + api_name: 'targetSecureTags' + output: true + description: | + A list of secure tags that controls which instances the firewall rule + applies to. If targetSecureTag are specified, then the + firewall rule applies only to instances in the VPC network that have one + of those EFFECTIVE secure tags, if all the target_secure_tag are in + INEFFECTIVE state, then this rule will be ignored. + targetSecureTag may not be set at the same time as + targetServiceAccounts. + If neither targetServiceAccounts nor + targetSecureTag are specified, the firewall rule applies + to all instances on the specified network. + Maximum number of target label tags allowed is 256. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'name' + output: true + description: | + Name of the secure tag, created with TagManager's TagValue API. + @pattern tagValues/[0-9]+ + - !ruby/object:Api::Type::Enum + name: 'state' + output: true + description: | + [Output Only] State of the secure tag, either `EFFECTIVE` or + `INEFFECTIVE`. A secure tag is `INEFFECTIVE` when it is deleted + or its network is deleted. + values: + - :EFFECTIVE + - :INEFFECTIVE + - !ruby/object:Api::Type::String + name: 'action' + output: true + description: | + The Action to perform when the client connection triggers the rule. Can currently be either + "allow", "deny", "apply_security_profile_group" or "goto_next". + - !ruby/object:Api::Type::Enum + name: 'direction' + output: true + description: | + The direction in which this rule applies. If unspecified an INGRESS rule is created. + values: + - :INGRESS + - :EGRESS + - !ruby/object:Api::Type::Boolean + name: 'enableLogging' + output: true + description: | + Denotes whether to enable logging for a particular rule. + If logging is enabled, logs will be exported to the + configured export destination in Stackdriver. + send_empty_value: true + - !ruby/object:Api::Type::Array + name: 'targetServiceAccounts' + output: true + description: | + A list of service accounts indicating the sets of + instances that are applied with this rule. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'securityProfileGroup' + output: true + description: | + A fully-qualified URL of a SecurityProfile resource instance. + Example: + https://networksecurity.googleapis.com/v1/projects/{project}/locations/{location}/securityProfileGroups/my-security-profile-group + Must be specified if action is 'apply_security_profile_group'. + - !ruby/object:Api::Type::Boolean + name: 'tlsInspect' + output: true + description: | + Boolean flag indicating if the traffic should be TLS decrypted. + It can be set only if action = 'apply_security_profile_group' and cannot be set for other actions. + - !ruby/object:Api::Type::Boolean + name: 'disabled' + output: true + description: | + Denotes whether the firewall policy rule is disabled. When set to true, + the firewall policy rule is not enforced and traffic behaves as if it did + not exist. If this is unspecified, the firewall policy rule will be + enabled. + - !ruby/object:Api::Type::Fingerprint + name: fingerprint + description: Fingerprint of the resource. This field is used internally during updates of this resource. + output: true + - !ruby/object:Api::Type::String + name: selfLink + description: Server-defined URL for the resource. + output: true + - !ruby/object:Api::Type::String + name: selfLinkWithId + description: Server-defined URL for this resource with the resource id. + output: true + - !ruby/object:Api::Type::Integer + name: ruleTupleCount + description: Total count of all firewall policy rule tuples. A firewall policy can not exceed a set number of tuples. + output: true diff --git a/mmv1/templates/terraform/constants/resource_compute_network_firewall_policy_with_rules.go.erb b/mmv1/templates/terraform/constants/resource_compute_network_firewall_policy_with_rules.go.erb new file mode 100644 index 000000000000..3a1f2c840e51 --- /dev/null +++ b/mmv1/templates/terraform/constants/resource_compute_network_firewall_policy_with_rules.go.erb @@ -0,0 +1,54 @@ +func networkFirewallPolicyWithRulesConvertPriorityToInt(v interface {}) (int64, error) { + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal, nil + } + } + + if intVal, ok := v.(int64); ok { + return intVal, nil + } + + if floatVal, ok := v.(float64); ok { + intVal := int64(floatVal) + return intVal, nil + } + + return 0, fmt.Errorf("Incorrect rule priority: %s. Priority must be a number", v) +} + +func networkFirewallPolicyWithRulesIsPredefinedRule(rule map[string]interface{}) (bool, error) { + // Priorities from 2147483548 to 2147483647 are reserved and cannot be modified by the user. + const ReservedPriorityStart = 2147483548 + + priority := rule["priority"] + priorityInt, err := networkFirewallPolicyWithRulesConvertPriorityToInt(priority) + + if err != nil { + return false, err + } + + return priorityInt >= ReservedPriorityStart, nil + +} + +func networkFirewallPolicyWithRulesSplitPredefinedRules(allRules []interface{}) ([]interface{}, []interface{}, error) { + predefinedRules := make([]interface{}, 0) + rules := make([]interface{}, 0) + + for _, rule := range allRules { + isPredefined, err := networkFirewallPolicyWithRulesIsPredefinedRule(rule.(map[string]interface{})) + if err != nil { + return nil, nil, err + } + + if isPredefined { + predefinedRules = append(predefinedRules, rule) + } else { + rules = append(rules, rule) + } + } + + return rules, predefinedRules, nil +} + diff --git a/mmv1/templates/terraform/decoders/resource_compute_network_firewall_policy_with_rules.go.erb b/mmv1/templates/terraform/decoders/resource_compute_network_firewall_policy_with_rules.go.erb new file mode 100644 index 000000000000..e702145fec61 --- /dev/null +++ b/mmv1/templates/terraform/decoders/resource_compute_network_firewall_policy_with_rules.go.erb @@ -0,0 +1,16 @@ +rules, predefinedRules, err := networkFirewallPolicyWithRulesSplitPredefinedRules(res["rules"].([]interface{})) + +if err != nil { + return nil, fmt.Errorf("Error occurred while splitting pre-defined rules: %s", err) +} + +res["rules"] = rules +res["predefinedRules"] = predefinedRules + +config := meta.(*transport_tpg.Config) + +if err := d.Set("predefined_rules", flattenComputeNetworkFirewallPolicyWithRulesPredefinedRules(predefinedRules, d, config)); err != nil { + return nil, fmt.Errorf("Error occurred while setting pre-defined rules: %s", err) +} + +return res, nil diff --git a/mmv1/templates/terraform/encoders/resource_compute_network_firewall_policy_with_rules.go.erb b/mmv1/templates/terraform/encoders/resource_compute_network_firewall_policy_with_rules.go.erb new file mode 100644 index 000000000000..939b22280811 --- /dev/null +++ b/mmv1/templates/terraform/encoders/resource_compute_network_firewall_policy_with_rules.go.erb @@ -0,0 +1,3 @@ +delete(obj, "rules") // Rules are not supported in the create API +return obj, nil + diff --git a/mmv1/templates/terraform/examples/compute_network_firewall_policy_with_rules_full.tf.erb b/mmv1/templates/terraform/examples/compute_network_firewall_policy_with_rules_full.tf.erb new file mode 100644 index 000000000000..13957d97b137 --- /dev/null +++ b/mmv1/templates/terraform/examples/compute_network_firewall_policy_with_rules_full.tf.erb @@ -0,0 +1,116 @@ +data "google_project" "project" { + provider = google-beta +} + +resource "google_compute_network_firewall_policy_with_rules" "<%= ctx[:primary_resource_id] %>" { + name = "<%= ctx[:vars]['policy_name'] %>" + description = "Terraform test" + provider = google-beta + + rule { + description = "tcp rule" + priority = 1000 + enable_logging = true + action = "allow" + direction = "EGRESS" + match { + layer4_config { + ip_protocol = "tcp" + ports = [8080, 7070] + } + dest_ip_ranges = ["11.100.0.1/32"] + dest_fqdns = ["www.yyy.com", "www.zzz.com"] + dest_region_codes = ["HK", "IN"] + dest_threat_intelligences = ["iplist-search-engines-crawlers", "iplist-tor-exit-nodes"] + dest_address_groups = [google_network_security_address_group.address_group_1.id] + } + target_secure_tag { + name = "tagValues/${google_tags_tag_value.secure_tag_value_1.name}" + } + } + rule { + description = "udp rule" + priority = 2000 + enable_logging = false + action = "deny" + direction = "INGRESS" + match { + layer4_config { + ip_protocol = "udp" + } + src_ip_ranges = ["0.0.0.0/0"] + src_fqdns = ["www.abc.com", "www.def.com"] + src_region_codes = ["US", "CA"] + src_threat_intelligences = ["iplist-known-malicious-ips", "iplist-public-clouds"] + src_address_groups = [google_network_security_address_group.address_group_1.id] + src_secure_tag { + name = "tagValues/${google_tags_tag_value.secure_tag_value_1.name}" + } + } + disabled = true + } + + rule { + description = "security profile group rule" + rule_name = "tcp rule" + priority = 3000 + enable_logging = false + action = "apply_security_profile_group" + direction = "INGRESS" + match { + layer4_config { + ip_protocol = "tcp" + } + src_ip_ranges = ["0.0.0.0/0"] + } + target_service_accounts = ["test@google.com"] + security_profile_group = "//networksecurity.googleapis.com/${google_network_security_security_profile_group.security_profile_group_1.id}" + tls_inspect = true + } +} + +resource "google_network_security_address_group" "address_group_1" { + provider = google-beta + name = "<%= ctx[:vars]['address_group_name'] %>" + parent = "projects/${data.google_project.project.name}" + description = "Global address group" + location = "global" + items = ["208.80.154.224/32"] + type = "IPV4" + capacity = 100 +} + +resource "google_tags_tag_key" "secure_tag_key_1" { + provider = google-beta + description = "Tag key" + parent = "projects/${data.google_project.project.name}" + purpose = "GCE_FIREWALL" + short_name = "<%= ctx[:vars]['tag_key_name'] %>" + purpose_data = { + network = "${data.google_project.project.name}/default" + } +} + +resource "google_tags_tag_value" "secure_tag_value_1" { + provider = google-beta + description = "Tag value" + parent = "tagKeys/${google_tags_tag_key.secure_tag_key_1.name}" + short_name = "<%= ctx[:vars]['tag_value_name'] %>" +} + +resource "google_network_security_security_profile_group" "security_profile_group_1" { + provider = google-beta + name = "<%= ctx[:vars]['security_profile_group_name'] %>" + parent = "organizations/<%= ctx[:test_env_vars]['org_id'] %>" + description = "my description" + threat_prevention_profile = google_network_security_security_profile.security_profile_1.id +} + +resource "google_network_security_security_profile" "security_profile_1" { + provider = google-beta + name = "<%= ctx[:vars]['security_profile_name'] %>" + type = "THREAT_PREVENTION" + parent = "organizations/<%= ctx[:test_env_vars]['org_id'] %>" + location = "global" +} + diff --git a/mmv1/templates/terraform/post_create/resource_compute_network_firewall_policy_with_rules.go.erb b/mmv1/templates/terraform/post_create/resource_compute_network_firewall_policy_with_rules.go.erb new file mode 100644 index 000000000000..ec4b1a1c5ce1 --- /dev/null +++ b/mmv1/templates/terraform/post_create/resource_compute_network_firewall_policy_with_rules.go.erb @@ -0,0 +1,31 @@ +log.Printf("[DEBUG] Post-create for NetworkFirewallPolicyWithRules %q", d.Id()) + +url, err = tpgresource.ReplaceVarsForId(d, config, "{{ComputeBasePath}}projects/{{project}}/global/firewallPolicies/{{name}}") +if err != nil { + return err +} + +headers = make(http.Header) +res, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Headers: headers, +}) +if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeNetworkFirewallPolicyWithRules %q", d.Id())) +} + +if err := d.Set("fingerprint", flattenComputeNetworkFirewallPolicyWithRulesFingerprint(res["fingerprint"], d, config)); err != nil { + return fmt.Errorf("Error reading NetworkFirewallPolicyWithRules: %s", err) +} + +res, err = resourceComputeNetworkFirewallPolicyWithRulesDecoder(d, meta, res) +if err != nil { + return err +} + +log.Printf("[DEBUG] Updating NetworkFirewallPolicyWithRules %q", d.Id()) +return resourceComputeNetworkFirewallPolicyWithRulesUpdate(d, meta) diff --git a/mmv1/templates/terraform/update_encoder/resource_compute_network_firewall_policy_with_rules.go.erb b/mmv1/templates/terraform/update_encoder/resource_compute_network_firewall_policy_with_rules.go.erb new file mode 100644 index 000000000000..a503293a6f87 --- /dev/null +++ b/mmv1/templates/terraform/update_encoder/resource_compute_network_firewall_policy_with_rules.go.erb @@ -0,0 +1,15 @@ +config := meta.(*transport_tpg.Config) + +predefinedRulesProp, err := expandComputeNetworkFirewallPolicyWithRulesRule(d.Get("predefined_rules"), d, config) +if err != nil { + return nil, err +} + +rules := obj["rules"].([]interface{}) +obj["rules"] = append(rules, predefinedRulesProp) + +return obj, nil + + + + diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_network_firewall_policy_with_rules_test.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_network_firewall_policy_with_rules_test.go.erb new file mode 100644 index 000000000000..38e8017cebc5 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/resource_compute_network_firewall_policy_with_rules_test.go.erb @@ -0,0 +1,264 @@ +<% autogen_exception -%> +package compute_test +<% unless version == 'ga' -%> +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + +) + +func TestAccComputeNetworkFirewallPolicyWithRules_update(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "org_id": envvar.GetTestOrgFromEnv(t), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckComputeNetworkFirewallPolicyWithRulesDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeNetworkFirewallPolicyWithRules_full(context), + }, + { + ResourceName: "google_compute_network_firewall_policy_with_rules.network-firewall-policy-with-rules", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeNetworkFirewallPolicyWithRules_update(context), + }, + { + ResourceName: "google_compute_network_firewall_policy_with_rules.network-firewall-policy-with-rules", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccComputeNetworkFirewallPolicyWithRules_full(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_project" "project" { + provider = google-beta +} + +resource "google_compute_network_firewall_policy_with_rules" "network-firewall-policy-with-rules" { + name = "tf-test-tf-fw-policy-with-rules%{random_suffix}" + description = "Terraform test" + provider = google-beta + + rule { + description = "tcp rule" + priority = 1000 + enable_logging = true + action = "allow" + direction = "EGRESS" + match { + layer4_config { + ip_protocol = "tcp" + ports = [8080, 7070] + } + dest_ip_ranges = ["11.100.0.1/32"] + dest_fqdns = ["www.yyy.com", "www.zzz.com"] + dest_region_codes = ["HK", "IN"] + dest_threat_intelligences = ["iplist-search-engines-crawlers", "iplist-tor-exit-nodes"] + dest_address_groups = [google_network_security_address_group.address_group_1.id] + } + target_secure_tag { + name = "tagValues/${google_tags_tag_value.secure_tag_value_1.name}" + } + } + rule { + description = "udp rule" + priority = 2000 + enable_logging = false + action = "deny" + direction = "INGRESS" + match { + layer4_config { + ip_protocol = "udp" + } + src_ip_ranges = ["0.0.0.0/0"] + src_fqdns = ["www.abc.com", "www.def.com"] + src_region_codes = ["US", "CA"] + src_threat_intelligences = ["iplist-known-malicious-ips", "iplist-public-clouds"] + src_address_groups = [google_network_security_address_group.address_group_1.id] + src_secure_tag { + name = "tagValues/${google_tags_tag_value.secure_tag_value_1.name}" + } + } + disabled = true + } + + rule { + description = "security profile group rule" + rule_name = "tcp rule" + priority = 3000 + enable_logging = false + action = "apply_security_profile_group" + direction = "INGRESS" + match { + layer4_config { + ip_protocol = "tcp" + } + src_ip_ranges = ["0.0.0.0/0"] + } + target_service_accounts = ["test@google.com"] + security_profile_group = "//networksecurity.googleapis.com/${google_network_security_security_profile_group.security_profile_group_1.id}" + } +} + +resource "google_network_security_address_group" "address_group_1" { + provider = google-beta + name = "tf-test-tf-address-group%{random_suffix}" + parent = "projects/${data.google_project.project.name}" + description = "Global address group" + location = "global" + items = ["208.80.154.224/32"] + type = "IPV4" + capacity = 100 +} + +resource "google_tags_tag_key" "secure_tag_key_1" { + provider = google-beta + description = "Tag key" + parent = "projects/${data.google_project.project.name}" + purpose = "GCE_FIREWALL" + short_name = "tf-test-tf-tag-key%{random_suffix}" + purpose_data = { + network = "${data.google_project.project.name}/default" + } +} + +resource "google_tags_tag_value" "secure_tag_value_1" { + provider = google-beta + description = "Tag value" + parent = "tagKeys/${google_tags_tag_key.secure_tag_key_1.name}" + short_name = "tf-test-tf-tag-value%{random_suffix}" +} + +resource "google_network_security_security_profile_group" "security_profile_group_1" { + provider = google-beta + name = "tf-test-tf-security-profile-group%{random_suffix}" + parent = "organizations/%{org_id}" + description = "my description" + threat_prevention_profile = google_network_security_security_profile.security_profile_1.id +} + +resource "google_network_security_security_profile" "security_profile_1" { + provider = google-beta + name = "tf-test-tf-security-profile%{random_suffix}" + type = "THREAT_PREVENTION" + parent = "organizations/%{org_id}" + location = "global" +} +`, context) +} + +func testAccComputeNetworkFirewallPolicyWithRules_update(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_project" "project" { + provider = google-beta +} + +resource "google_compute_network_firewall_policy_with_rules" "network-firewall-policy-with-rules" { + name = "tf-test-tf-fw-policy-with-rules%{random_suffix}" + description = "Terraform test - update" + provider = google-beta + + rule { + description = "tcp rule - changed" + priority = 1000 + enable_logging = false + action = "apply_security_profile_group" + direction = "EGRESS" + match { + layer4_config { + ip_protocol = "tcp" + ports = [8080, 7070] + } + dest_ip_ranges = ["11.100.0.1/32"] + } + target_service_accounts = ["test@google.com"] + security_profile_group = "//networksecurity.googleapis.com/${google_network_security_security_profile_group.security_profile_group_1.id}" + tls_inspect = true + } + rule { + description = "new udp rule" + priority = 4000 + enable_logging = true + action = "deny" + direction = "INGRESS" + match { + layer4_config { + ip_protocol = "udp" + } + src_ip_ranges = ["0.0.0.0/0"] + src_fqdns = ["www.abc.com", "www.ghi.com"] + src_region_codes = ["IT", "FR"] + src_threat_intelligences = ["iplist-public-clouds"] + src_address_groups = [google_network_security_address_group.address_group_1.id] + src_secure_tag { + name = "tagValues/${google_tags_tag_value.secure_tag_value_1.name}" + } + } + disabled = false + } +} + +resource "google_network_security_address_group" "address_group_1" { + provider = google-beta + name = "tf-test-tf-address-group%{random_suffix}" + parent = "projects/${data.google_project.project.name}" + description = "Global address group" + location = "global" + items = ["208.80.154.224/32"] + type = "IPV4" + capacity = 100 +} + +resource "google_tags_tag_key" "secure_tag_key_1" { + provider = google-beta + description = "Tag key" + parent = "projects/${data.google_project.project.name}" + purpose = "GCE_FIREWALL" + short_name = "tf-test-tf-tag-key%{random_suffix}" + purpose_data = { + network = "${data.google_project.project.name}/default" + } +} + +resource "google_tags_tag_value" "secure_tag_value_1" { + provider = google-beta + description = "Tag value" + parent = "tagKeys/${google_tags_tag_key.secure_tag_key_1.name}" + short_name = "tf-test-tf-tag-value%{random_suffix}" +} + +resource "google_network_security_security_profile_group" "security_profile_group_1" { + provider = google-beta + name = "tf-test-tf-security-profile-group%{random_suffix}" + parent = "organizations/%{org_id}" + description = "my description" + threat_prevention_profile = google_network_security_security_profile.security_profile_1.id +} + +resource "google_network_security_security_profile" "security_profile_1" { + provider = google-beta + name = "tf-test-tf-security-profile%{random_suffix}" + type = "THREAT_PREVENTION" + parent = "organizations/%{org_id}" + location = "global" +} +`, context) +} +<% end -%> + + From 40b53825878a3eaaf56d8dbcc49835c69723a31a Mon Sep 17 00:00:00 2001 From: SizzleHsu Date: Thu, 5 Sep 2024 12:36:48 -0700 Subject: [PATCH 45/60] Add sweeper for storage pool resources. (#11571) --- .../resource_compute_storage_pool_sweeper.go | 96 +++++++++++++++++++ 1 file changed, 96 insertions(+) create mode 100644 mmv1/third_party/terraform/services/compute/resource_compute_storage_pool_sweeper.go diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_storage_pool_sweeper.go b/mmv1/third_party/terraform/services/compute/resource_compute_storage_pool_sweeper.go new file mode 100644 index 000000000000..bee2d1bdb3cc --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/resource_compute_storage_pool_sweeper.go @@ -0,0 +1,96 @@ +package compute + +import ( + "context" + "log" + + "github.com/hashicorp/terraform-provider-google/google/sweeper" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +// This will sweep GCE Storage Pool resources +func init() { + sweeper.AddTestSweepers("ComputeStoragePool", testSweepStoragePool) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepStoragePool(region string) error { + resourceName := "ComputeStoragePool" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + zones := []string{"us-central1-a", "us-central1-b", "us-central1-c", "us-central1-f", "us-east1-b", "us-east1-c", "us-east1-d", "us-west1-a", "us-west1-b", "us-west1-c"} + for _, zone := range zones { + servicesUrl := "https://compute.googleapis.com/compute/v1/projects/" + config.Project + "/zones/" + zone + "/storagePools" + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: servicesUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", servicesUrl, err) + return nil + } + + resourceList, ok := res["items"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Count items that weren't sweeped. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["id"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource id was nil", resourceName) + return nil + } + + id := obj["name"].(string) + // Increment count and skip if resource is not sweepable. + if !sweeper.IsSweepableTestResource(id) { + nonPrefixCount++ + continue + } + + deleteUrl := servicesUrl + "/" + id + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, id) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items without tf-test prefix remain for zone %s", nonPrefixCount, zone) + } + + } + + return nil +} From b9e00f4e463a233076d2118e3a72b798a9a9b41f Mon Sep 17 00:00:00 2001 From: Naitian Liu <83430653+naitianliu-google@users.noreply.github.com> Date: Thu, 5 Sep 2024 19:06:03 -0700 Subject: [PATCH 46/60] Update docs for new image types support for VmwareNodePool (#11643) --- mmv1/products/gkeonprem/VmwareNodePool.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mmv1/products/gkeonprem/VmwareNodePool.yaml b/mmv1/products/gkeonprem/VmwareNodePool.yaml index f95443b71d53..3e5256cfe5be 100644 --- a/mmv1/products/gkeonprem/VmwareNodePool.yaml +++ b/mmv1/products/gkeonprem/VmwareNodePool.yaml @@ -113,7 +113,7 @@ properties: required: true description: | The OS image to be used for each node in a node pool. - Currently `cos`, `ubuntu`, `ubuntu_containerd` and `windows` are supported. + Currently `cos`, `cos_cgv2`, `ubuntu`, `ubuntu_cgv2`, `ubuntu_containerd` and `windows` are supported. - !ruby/object:Api::Type::String name: "image" description: The OS image name in vCenter, only valid when using Windows. From 01df8121b116006b3e35ac107d89cc0d16f25bb7 Mon Sep 17 00:00:00 2001 From: Nick Elliot Date: Fri, 6 Sep 2024 02:42:04 -0700 Subject: [PATCH 47/60] re-enable Dataproc Metastore Federation tests (#10445) Co-authored-by: Sarah French <15078782+SarahFrench@users.noreply.github.com> --- mmv1/products/metastore/Federation.yaml | 2 -- mmv1/products/metastore/Service.yaml | 1 - .../examples/dataproc_metastore_federation_basic.tf.erb | 4 ++-- .../examples/dataproc_metastore_federation_bigquery.tf.erb | 4 ++-- .../dataproc_metastore_service_private_service_connect.tf.erb | 1 + .../examples/go/dataproc_metastore_federation_basic.tf.tmpl | 4 ++-- .../go/dataproc_metastore_federation_bigquery.tf.tmpl | 4 ++-- ...dataproc_metastore_service_private_service_connect.tf.tmpl | 1 + 8 files changed, 10 insertions(+), 11 deletions(-) diff --git a/mmv1/products/metastore/Federation.yaml b/mmv1/products/metastore/Federation.yaml index fbcac6ed22a2..1a9d671b1569 100644 --- a/mmv1/products/metastore/Federation.yaml +++ b/mmv1/products/metastore/Federation.yaml @@ -51,7 +51,6 @@ import_format: examples: - !ruby/object:Provider::Terraform::Examples name: 'dataproc_metastore_federation_basic' - skip_test: true # https://github.com/hashicorp/terraform-provider-google/issues/13710 primary_resource_id: 'default' primary_resource_name: 'fmt.Sprintf("tf-test-metastore-fed%s", context["random_suffix"])' @@ -60,7 +59,6 @@ examples: service_id: 'metastore-service' - !ruby/object:Provider::Terraform::Examples name: 'dataproc_metastore_federation_bigquery' - skip_test: true # https://github.com/hashicorp/terraform-provider-google/issues/13710 primary_resource_id: 'default' primary_resource_name: 'fmt.Sprintf("tf-test-metastore-fed%s", context["random_suffix"])' diff --git a/mmv1/products/metastore/Service.yaml b/mmv1/products/metastore/Service.yaml index 1836c904df2b..121c5261e63f 100644 --- a/mmv1/products/metastore/Service.yaml +++ b/mmv1/products/metastore/Service.yaml @@ -83,7 +83,6 @@ examples: - !ruby/object:Provider::Terraform::Examples name: 'dataproc_metastore_service_endpoint' skip_docs: true - skip_test: true # https://github.com/hashicorp/terraform-provider-google/issues/13710 primary_resource_id: 'endpoint' vars: metastore_service_name: 'metastore-endpoint' diff --git a/mmv1/templates/terraform/examples/dataproc_metastore_federation_basic.tf.erb b/mmv1/templates/terraform/examples/dataproc_metastore_federation_basic.tf.erb index 52d6a8329017..6ec9e8fdff35 100644 --- a/mmv1/templates/terraform/examples/dataproc_metastore_federation_basic.tf.erb +++ b/mmv1/templates/terraform/examples/dataproc_metastore_federation_basic.tf.erb @@ -1,6 +1,6 @@ resource "google_dataproc_metastore_federation" "<%= ctx[:primary_resource_id] %>" { location = "us-central1" - federation_id = "<%= ctx[:vars]['metastore_federation_name'] %>" + federation_id = "<%= ctx[:vars]['federation_id'] %>" version = "3.1.2" backend_metastores { @@ -11,7 +11,7 @@ resource "google_dataproc_metastore_federation" "<%= ctx[:primary_resource_id] % } resource "google_dataproc_metastore_service" "<%= ctx[:primary_resource_id] %>" { - service_id = "<%= ctx[:vars]['metastore_federation_name'] %>" + service_id = "<%= ctx[:vars]['service_id'] %>" location = "us-central1" tier = "DEVELOPER" diff --git a/mmv1/templates/terraform/examples/dataproc_metastore_federation_bigquery.tf.erb b/mmv1/templates/terraform/examples/dataproc_metastore_federation_bigquery.tf.erb index 696b886726a4..64f760359549 100644 --- a/mmv1/templates/terraform/examples/dataproc_metastore_federation_bigquery.tf.erb +++ b/mmv1/templates/terraform/examples/dataproc_metastore_federation_bigquery.tf.erb @@ -1,6 +1,6 @@ resource "google_dataproc_metastore_federation" "<%= ctx[:primary_resource_id] %>" { location = "us-central1" - federation_id = "<%= ctx[:vars]['metastore_federation_name'] %>" + federation_id = "<%= ctx[:vars]['federation_id'] %>" version = "3.1.2" backend_metastores { @@ -17,7 +17,7 @@ resource "google_dataproc_metastore_federation" "<%= ctx[:primary_resource_id] % } resource "google_dataproc_metastore_service" "<%= ctx[:primary_resource_id] %>" { - service_id = "<%= ctx[:vars]['metastore_federation_name'] %>" + service_id = "<%= ctx[:vars]['service_id'] %>" location = "us-central1" tier = "DEVELOPER" diff --git a/mmv1/templates/terraform/examples/dataproc_metastore_service_private_service_connect.tf.erb b/mmv1/templates/terraform/examples/dataproc_metastore_service_private_service_connect.tf.erb index 564949ed8477..01cfb57ba8bb 100644 --- a/mmv1/templates/terraform/examples/dataproc_metastore_service_private_service_connect.tf.erb +++ b/mmv1/templates/terraform/examples/dataproc_metastore_service_private_service_connect.tf.erb @@ -14,6 +14,7 @@ resource "google_compute_subnetwork" "subnet" { resource "google_dataproc_metastore_service" "<%= ctx[:primary_resource_id] %>" { service_id = "<%= ctx[:vars]['metastore_service_name'] %>" location = "us-central1" + tier = "DEVELOPER" hive_metastore_config { version = "3.1.2" diff --git a/mmv1/templates/terraform/examples/go/dataproc_metastore_federation_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/dataproc_metastore_federation_basic.tf.tmpl index a567f0d3c370..46a80cc4e7e3 100644 --- a/mmv1/templates/terraform/examples/go/dataproc_metastore_federation_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/dataproc_metastore_federation_basic.tf.tmpl @@ -1,6 +1,6 @@ resource "google_dataproc_metastore_federation" "{{$.PrimaryResourceId}}" { location = "us-central1" - federation_id = "{{index $.Vars "metastore_federation_name"}}" + federation_id = "{{index $.Vars "federation_id"}}" version = "3.1.2" backend_metastores { @@ -11,7 +11,7 @@ resource "google_dataproc_metastore_federation" "{{$.PrimaryResourceId}}" { } resource "google_dataproc_metastore_service" "{{$.PrimaryResourceId}}" { - service_id = "{{index $.Vars "metastore_federation_name"}}" + service_id = "{{index $.Vars "service_id"}}" location = "us-central1" tier = "DEVELOPER" diff --git a/mmv1/templates/terraform/examples/go/dataproc_metastore_federation_bigquery.tf.tmpl b/mmv1/templates/terraform/examples/go/dataproc_metastore_federation_bigquery.tf.tmpl index 5e2a4cadd81f..4d22834f235c 100644 --- a/mmv1/templates/terraform/examples/go/dataproc_metastore_federation_bigquery.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/dataproc_metastore_federation_bigquery.tf.tmpl @@ -1,6 +1,6 @@ resource "google_dataproc_metastore_federation" "{{$.PrimaryResourceId}}" { location = "us-central1" - federation_id = "{{index $.Vars "metastore_federation_name"}}" + federation_id = "{{index $.Vars "federation_id"}}" version = "3.1.2" backend_metastores { @@ -17,7 +17,7 @@ resource "google_dataproc_metastore_federation" "{{$.PrimaryResourceId}}" { } resource "google_dataproc_metastore_service" "{{$.PrimaryResourceId}}" { - service_id = "{{index $.Vars "metastore_federation_name"}}" + service_id = "{{index $.Vars "service_id"}}" location = "us-central1" tier = "DEVELOPER" diff --git a/mmv1/templates/terraform/examples/go/dataproc_metastore_service_private_service_connect.tf.tmpl b/mmv1/templates/terraform/examples/go/dataproc_metastore_service_private_service_connect.tf.tmpl index 72447a11e06f..5b07bf408e5c 100644 --- a/mmv1/templates/terraform/examples/go/dataproc_metastore_service_private_service_connect.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/dataproc_metastore_service_private_service_connect.tf.tmpl @@ -14,6 +14,7 @@ resource "google_compute_subnetwork" "subnet" { resource "google_dataproc_metastore_service" "{{$.PrimaryResourceId}}" { service_id = "{{index $.Vars "metastore_service_name"}}" location = "us-central1" + tier = "DEVELOPER" hive_metastore_config { version = "3.1.2" From 8a1e701ec0c8b615c2d69936e2511cc3041f0ac6 Mon Sep 17 00:00:00 2001 From: Scott Suarez Date: Fri, 6 Sep 2024 08:03:08 -0700 Subject: [PATCH 48/60] remove community checker code and update trigger targets (#11646) Co-authored-by: Stephen Lewis (Burrows) --- .ci/gcb-community-checker.yml | 81 ------------- .ci/gcb-contributor-membership-checker.yml | 6 +- .ci/magician/cloudbuild/build_trigger.go | 61 ---------- .ci/magician/cloudbuild/community.go | 6 +- .ci/magician/cmd/community_checker.go | 97 --------------- .ci/magician/cmd/community_checker_test.go | 126 -------------------- .ci/magician/cmd/interfaces.go | 3 +- .ci/magician/cmd/membership_checker.go | 2 +- .ci/magician/cmd/membership_checker_test.go | 6 +- .ci/magician/cmd/mock_cloudbuild_test.go | 9 +- 10 files changed, 11 insertions(+), 386 deletions(-) delete mode 100644 .ci/gcb-community-checker.yml delete mode 100644 .ci/magician/cloudbuild/build_trigger.go delete mode 100644 .ci/magician/cmd/community_checker.go delete mode 100644 .ci/magician/cmd/community_checker_test.go diff --git a/.ci/gcb-community-checker.yml b/.ci/gcb-community-checker.yml deleted file mode 100644 index ba689d1307ff..000000000000 --- a/.ci/gcb-community-checker.yml +++ /dev/null @@ -1,81 +0,0 @@ ---- -steps: - # The GCB / GH integration uses a shallow clone of the repo. We need to convert - # that to a full clone in order to work with it properly. - # https://cloud.google.com/source-repositories/docs/integrating-with-cloud-build#unshallowing_clones - - name: "gcr.io/cloud-builders/git" - args: - - fetch - - --unshallow - - # Configure git - - name: "gcr.io/cloud-builders/git" - args: - - config - - --global - - user.email - - magic-modules+differ@google.com - - name: "gcr.io/cloud-builders/git" - args: - - config - - --global - - user.name - - "Modular Magician Diff Process" - - # Display commit log for clarity - - name: "gcr.io/cloud-builders/git" - args: - - log - - "--oneline" - - "-n 10" - - # Find common ancestor commit and apply diff for the .ci folder. - - name: "gcr.io/cloud-builders/git" - id: findMergeBase - entrypoint: "bash" - args: - - "-c" - - | - git fetch origin main - if [ "$_BASE_BRANCH" != "main" ]; then - echo "Checking out .ci/ folder from main" - git checkout origin/main -- .ci/ - else - base_commit=$(git merge-base origin/main HEAD) - echo "Common ancestor commit: $base_commit" - git diff $base_commit origin/main -- .ci/ - git diff $base_commit origin/main -- .ci/ > /workspace/ci.diff - git apply ./ci.diff --allow-empty - fi - if [ "$_BASE_BRANCH" != "main" ]; then - echo "Checking out tools/ folder from main" - git checkout origin/main -- tools/ - else - base_commit=$(git merge-base origin/main HEAD) - echo "Common ancestor commit: $base_commit" - git diff $base_commit origin/main -- tools/ - git diff $base_commit origin/main -- tools/ > /workspace/tools.diff - git apply ./tools.diff --allow-empty - fi - - - name: 'gcr.io/graphite-docker-images/go-plus' - entrypoint: '/workspace/.ci/scripts/go-plus/magician/exec.sh' - id: community-checker - secretEnv: ["GITHUB_TOKEN_MAGIC_MODULES", "GENERATE_DIFFS_TRIGGER"] - timeout: 8000s - args: - - "community-checker" - - $_PR_NUMBER - - $COMMIT_SHA - - $BRANCH_NAME - - $_HEAD_REPO_URL - - $_HEAD_BRANCH - - $_BASE_BRANCH - -logsBucket: 'gs://cloudbuild-community-checker-logs' -availableSecrets: - secretManager: - - versionName: projects/673497134629/secrets/github-magician-token-generate-diffs-magic-modules/versions/latest - env: GITHUB_TOKEN_MAGIC_MODULES - - versionName: projects/673497134629/secrets/ci-trigger-generate-diffs/versions/latest - env: GENERATE_DIFFS_TRIGGER diff --git a/.ci/gcb-contributor-membership-checker.yml b/.ci/gcb-contributor-membership-checker.yml index ce7260352953..dbb9fb1c902b 100644 --- a/.ci/gcb-contributor-membership-checker.yml +++ b/.ci/gcb-contributor-membership-checker.yml @@ -62,7 +62,7 @@ steps: entrypoint: "/workspace/.ci/scripts/go-plus/magician/exec.sh" id: contributor-membership-checker secretEnv: - ["GITHUB_TOKEN_MAGIC_MODULES", "GENERATE_DIFFS_TRIGGER", "COMMUNITY_CHECKER_TRIGGER", "DOWNSTREAM_GENERATION_AND_TEST_TRIGGER"] + ["GITHUB_TOKEN_MAGIC_MODULES", "DOWNSTREAM_GENERATION_AND_TEST_TRIGGER"] timeout: 8000s args: - "membership-checker" @@ -74,9 +74,5 @@ availableSecrets: secretManager: - versionName: projects/673497134629/secrets/github-magician-token-generate-diffs-magic-modules/versions/latest env: GITHUB_TOKEN_MAGIC_MODULES - - versionName: projects/673497134629/secrets/ci-trigger-generate-diffs/versions/latest - env: GENERATE_DIFFS_TRIGGER - - versionName: projects/673497134629/secrets/ci-trigger-community-checker/versions/latest - env: COMMUNITY_CHECKER_TRIGGER - versionName: projects/673497134629/secrets/ci-trigger-downstream-generation-and-test/versions/latest env: DOWNSTREAM_GENERATION_AND_TEST_TRIGGER diff --git a/.ci/magician/cloudbuild/build_trigger.go b/.ci/magician/cloudbuild/build_trigger.go deleted file mode 100644 index f776af1222cd..000000000000 --- a/.ci/magician/cloudbuild/build_trigger.go +++ /dev/null @@ -1,61 +0,0 @@ -/* -* Copyright 2023 Google LLC. All Rights Reserved. -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. - */ -package cloudbuild - -import ( - "context" - "fmt" - "os" - - cloudbuildv1 "google.golang.org/api/cloudbuild/v1" -) - -func (cb *Client) TriggerMMPresubmitRuns(commitSha string, substitutions map[string]string) error { - presubmitTriggerId, ok := os.LookupEnv("GENERATE_DIFFS_TRIGGER") - if !ok { - return fmt.Errorf("did not provide GENERATE_DIFFS_TRIGGER environment variable") - } - - err := triggerCloudBuildRun(PROJECT_ID, presubmitTriggerId, REPO_NAME, commitSha, substitutions) - if err != nil { - return err - } - - return nil -} - -func triggerCloudBuildRun(projectId, triggerId, repoName, commitSha string, substitutions map[string]string) error { - ctx := context.Background() - c, err := cloudbuildv1.NewService(ctx) - if err != nil { - return fmt.Errorf("failed to create Cloud Build service client: %s", err) - } - - repoSource := &cloudbuildv1.RepoSource{ - ProjectId: projectId, - RepoName: repoName, - CommitSha: commitSha, - Substitutions: substitutions, - } - - _, err = c.Projects.Triggers.Run(projectId, triggerId, repoSource).Do() - if err != nil { - return fmt.Errorf("failed to create Cloud Build run: %s", err) - } - - fmt.Println("Started Cloud Build Run: ", triggerId) - return nil -} diff --git a/.ci/magician/cloudbuild/community.go b/.ci/magician/cloudbuild/community.go index 5b352975c0e9..9c01d7c94f01 100644 --- a/.ci/magician/cloudbuild/community.go +++ b/.ci/magician/cloudbuild/community.go @@ -23,7 +23,7 @@ import ( cloudbuildv1 "google.golang.org/api/cloudbuild/v1" ) -func (cb *Client) ApproveCommunityChecker(prNumber, commitSha string) error { +func (cb *Client) ApproveDownstreamGenAndTest(prNumber, commitSha string) error { buildId, err := getPendingBuildId(PROJECT_ID, commitSha) if err != nil { return err @@ -42,9 +42,9 @@ func (cb *Client) ApproveCommunityChecker(prNumber, commitSha string) error { } func getPendingBuildId(projectId, commitSha string) (string, error) { - COMMUNITY_CHECKER_TRIGGER, ok := os.LookupEnv("COMMUNITY_CHECKER_TRIGGER") + COMMUNITY_CHECKER_TRIGGER, ok := os.LookupEnv("DOWNSTREAM_GENERATION_AND_TEST_TRIGGER") if !ok { - return "", fmt.Errorf("Did not provide COMMUNITY_CHECKER_TRIGGER environment variable") + return "", fmt.Errorf("Did not provide DOWNSTREAM_GENERATION_AND_TEST_TRIGGER environment variable") } ctx := context.Background() diff --git a/.ci/magician/cmd/community_checker.go b/.ci/magician/cmd/community_checker.go deleted file mode 100644 index f86aabdfba58..000000000000 --- a/.ci/magician/cmd/community_checker.go +++ /dev/null @@ -1,97 +0,0 @@ -/* -* Copyright 2023 Google LLC. All Rights Reserved. -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. - */ -package cmd - -import ( - "fmt" - "magician/cloudbuild" - "magician/github" - - "github.com/spf13/cobra" -) - -// communityApprovalCmd represents the communityApproval command -var communityApprovalCmd = &cobra.Command{ - Use: "community-checker", - Short: "Run presubmit generate diffs for untrusted users and remove awaiting-approval label", - Long: `This command processes pull requests and performs various validations and actions based on the PR's metadata and author. - - The following PR details are expected as arguments: - 1. PR Number - 2. Commit SHA - 3. Branch Name - 4. Head Repo URL - 5. Head Branch - 6. Base Branch - - The command performs the following steps: - 1. Trigger cloud presubmits with specific substitutions for the PR. - 2. Remove the 'awaiting-approval' label from the PR. - `, - RunE: func(cmd *cobra.Command, args []string) error { - prNumber := args[0] - fmt.Println("PR Number: ", prNumber) - - commitSha := args[1] - fmt.Println("Commit SHA: ", commitSha) - - branchName := args[2] - fmt.Println("Branch Name: ", branchName) - - headRepoUrl := args[3] - fmt.Println("Head Repo URL: ", headRepoUrl) - - headBranch := args[4] - fmt.Println("Head Branch: ", headBranch) - - baseBranch := args[5] - fmt.Println("Base Branch: ", baseBranch) - - githubToken, ok := lookupGithubTokenOrFallback("GITHUB_TOKEN_MAGIC_MODULES") - if !ok { - return fmt.Errorf("did not provide GITHUB_TOKEN_MAGIC_MODULES or GITHUB_TOKEN environment variables") - } - gh := github.NewClient(githubToken) - cb := cloudbuild.NewClient() - return execCommunityChecker(prNumber, commitSha, branchName, headRepoUrl, headBranch, baseBranch, gh, cb) - }, -} - -func execCommunityChecker(prNumber, commitSha, branchName, headRepoUrl, headBranch, baseBranch string, gh GithubClient, cb CloudbuildClient) error { - substitutions := map[string]string{ - "BRANCH_NAME": branchName, - "_PR_NUMBER": prNumber, - "_HEAD_REPO_URL": headRepoUrl, - "_HEAD_BRANCH": headBranch, - "_BASE_BRANCH": baseBranch, - } - - // trigger presubmit builds - community-checker requires approval - // (explicitly or via membership-checker) - err := cb.TriggerMMPresubmitRuns(commitSha, substitutions) - if err != nil { - return err - } - - // in community-checker job: - // remove awaiting-approval label from external contributor PRs - gh.RemoveLabel(prNumber, "awaiting-approval") - return nil -} - -func init() { - rootCmd.AddCommand(communityApprovalCmd) -} diff --git a/.ci/magician/cmd/community_checker_test.go b/.ci/magician/cmd/community_checker_test.go deleted file mode 100644 index 9880d83031a3..000000000000 --- a/.ci/magician/cmd/community_checker_test.go +++ /dev/null @@ -1,126 +0,0 @@ -/* -* Copyright 2023 Google LLC. All Rights Reserved. -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. - */ -package cmd - -import ( - "magician/github" - "reflect" - "testing" -) - -func TestExecCommunityChecker_CoreContributorFlow(t *testing.T) { - gh := &mockGithub{ - pullRequest: github.PullRequest{ - User: github.User{ - Login: "core_author", - }, - }, - userType: github.CoreContributorUserType, - calledMethods: make(map[string][][]any), - } - cb := &mockCloudBuild{ - calledMethods: make(map[string][][]any), - } - - execCommunityChecker("pr1", "sha1", "branch1", "url1", "head1", "base1", gh, cb) - - method := "TriggerMMPresubmitRuns" - expected := [][]any{{"sha1", map[string]string{"BRANCH_NAME": "branch1", "_BASE_BRANCH": "base1", "_HEAD_BRANCH": "head1", "_HEAD_REPO_URL": "url1", "_PR_NUMBER": "pr1"}}} - if calls, ok := cb.calledMethods[method]; !ok { - t.Fatal("Presubmit runs not triggered for core contributor") - } else if !reflect.DeepEqual(calls, expected) { - t.Fatalf("Wrong calls for %s, got %v, expected %v", method, calls, expected) - } - - method = "RemoveLabel" - expected = [][]any{{"pr1", "awaiting-approval"}} - if calls, ok := gh.calledMethods[method]; !ok { - t.Fatal("awaiting-approval label not removed for PR ") - } else if !reflect.DeepEqual(calls, expected) { - t.Fatalf("Wrong calls for %s, got %v, expected %v", method, calls, expected) - } - -} - -func TestExecCommunityChecker_GooglerFlow(t *testing.T) { - gh := &mockGithub{ - pullRequest: github.PullRequest{ - User: github.User{ - Login: "googler_author", - }, - }, - userType: github.GooglerUserType, - calledMethods: make(map[string][][]any), - requestedReviewers: []github.User{github.User{Login: "reviewer1"}}, - previousReviewers: []github.User{github.User{Login: github.GetRandomReviewer()}, github.User{Login: "reviewer3"}}, - } - cb := &mockCloudBuild{ - calledMethods: make(map[string][][]any), - } - - execCommunityChecker("pr1", "sha1", "branch1", "url1", "head1", "base1", gh, cb) - - method := "TriggerMMPresubmitRuns" - expected := [][]any{{"sha1", map[string]string{"BRANCH_NAME": "branch1", "_BASE_BRANCH": "base1", "_HEAD_BRANCH": "head1", "_HEAD_REPO_URL": "url1", "_PR_NUMBER": "pr1"}}} - if calls, ok := cb.calledMethods[method]; !ok { - t.Fatal("Presubmit runs not triggered for googler") - } else if !reflect.DeepEqual(calls, expected) { - t.Fatalf("Wrong calls for %s, got %v, expected %v", method, calls, expected) - } - - method = "RemoveLabel" - expected = [][]any{{"pr1", "awaiting-approval"}} - if calls, ok := gh.calledMethods[method]; !ok { - t.Fatal("awaiting-approval label not removed for PR ") - } else if !reflect.DeepEqual(calls, expected) { - t.Fatalf("Wrong calls for %s, got %v, expected %v", method, calls, expected) - } -} - -func TestExecCommunityChecker_AmbiguousUserFlow(t *testing.T) { - gh := &mockGithub{ - pullRequest: github.PullRequest{ - User: github.User{ - Login: "ambiguous_author", - }, - }, - userType: github.CommunityUserType, - calledMethods: make(map[string][][]any), - requestedReviewers: []github.User{github.User{Login: github.GetRandomReviewer()}}, - previousReviewers: []github.User{github.User{Login: github.GetRandomReviewer()}, github.User{Login: "reviewer3"}}, - } - cb := &mockCloudBuild{ - calledMethods: make(map[string][][]any), - } - - execCommunityChecker("pr1", "sha1", "branch1", "url1", "head1", "base1", gh, cb) - - method := "TriggerMMPresubmitRuns" - expected := [][]any{{"sha1", map[string]string{"BRANCH_NAME": "branch1", "_BASE_BRANCH": "base1", "_HEAD_BRANCH": "head1", "_HEAD_REPO_URL": "url1", "_PR_NUMBER": "pr1"}}} - if calls, ok := cb.calledMethods[method]; !ok { - t.Fatal("Presubmit runs not triggered for ambiguous user") - } else if !reflect.DeepEqual(calls, expected) { - t.Fatalf("Wrong calls for %s, got %v, expected %v", method, calls, expected) - } - - method = "RemoveLabel" - expected = [][]any{{"pr1", "awaiting-approval"}} - if calls, ok := gh.calledMethods[method]; !ok { - t.Fatal("awaiting-approval label not removed for PR ") - } else if !reflect.DeepEqual(calls, expected) { - t.Fatalf("Wrong calls for %s, got %v, expected %v", method, calls, expected) - } -} diff --git a/.ci/magician/cmd/interfaces.go b/.ci/magician/cmd/interfaces.go index 948ab794c907..b7007b89e934 100644 --- a/.ci/magician/cmd/interfaces.go +++ b/.ci/magician/cmd/interfaces.go @@ -36,8 +36,7 @@ type GithubClient interface { } type CloudbuildClient interface { - ApproveCommunityChecker(prNumber, commitSha string) error - TriggerMMPresubmitRuns(commitSha string, substitutions map[string]string) error + ApproveDownstreamGenAndTest(prNumber, commitSha string) error } type ExecRunner interface { diff --git a/.ci/magician/cmd/membership_checker.go b/.ci/magician/cmd/membership_checker.go index 7e149a122dad..0faa0fef8b1a 100644 --- a/.ci/magician/cmd/membership_checker.go +++ b/.ci/magician/cmd/membership_checker.go @@ -74,7 +74,7 @@ func execMembershipChecker(prNumber, commitSha string, gh GithubClient, cb Cloud // 1. auto approve community-checker run for trusted users // 2. add awaiting-approval label to external contributor PRs if trusted { - err = cb.ApproveCommunityChecker(prNumber, commitSha) + err = cb.ApproveDownstreamGenAndTest(prNumber, commitSha) if err != nil { return err } diff --git a/.ci/magician/cmd/membership_checker_test.go b/.ci/magician/cmd/membership_checker_test.go index d13ae2c3df0c..c32c05cff319 100644 --- a/.ci/magician/cmd/membership_checker_test.go +++ b/.ci/magician/cmd/membership_checker_test.go @@ -37,7 +37,7 @@ func TestExecMembershipChecker_CoreContributorFlow(t *testing.T) { execMembershipChecker("pr1", "sha1", gh, cb) - method := "ApproveCommunityChecker" + method := "ApproveDownstreamGenAndTest" expected := [][]any{{"pr1", "sha1"}} if calls, ok := cb.calledMethods[method]; !ok { t.Fatal("Community checker not approved for core author") @@ -65,7 +65,7 @@ func TestExecMembershipChecker_GooglerFlow(t *testing.T) { execMembershipChecker("pr1", "sha1", gh, cb) - method := "ApproveCommunityChecker" + method := "ApproveDownstreamGenAndTest" expected := [][]any{{"pr1", "sha1"}} if calls, ok := cb.calledMethods[method]; !ok { t.Fatal("Community checker not approved for googler") @@ -100,7 +100,7 @@ func TestExecMembershipChecker_AmbiguousUserFlow(t *testing.T) { t.Fatalf("Wrong calls for %s, got %v, expected %v", method, calls, expected) } - if _, ok := gh.calledMethods["ApproveCommunityChecker"]; ok { + if _, ok := gh.calledMethods["ApproveDownstreamGenAndTest"]; ok { t.Fatal("Incorrectly approved community checker for ambiguous user") } } diff --git a/.ci/magician/cmd/mock_cloudbuild_test.go b/.ci/magician/cmd/mock_cloudbuild_test.go index d95ae3f3ffea..37a5ab399e95 100644 --- a/.ci/magician/cmd/mock_cloudbuild_test.go +++ b/.ci/magician/cmd/mock_cloudbuild_test.go @@ -19,12 +19,7 @@ type mockCloudBuild struct { calledMethods map[string][][]any } -func (m *mockCloudBuild) ApproveCommunityChecker(prNumber, commitSha string) error { - m.calledMethods["ApproveCommunityChecker"] = append(m.calledMethods["ApproveCommunityChecker"], []any{prNumber, commitSha}) - return nil -} - -func (m *mockCloudBuild) TriggerMMPresubmitRuns(commitSha string, substitutions map[string]string) error { - m.calledMethods["TriggerMMPresubmitRuns"] = append(m.calledMethods["TriggerMMPresubmitRuns"], []any{commitSha, substitutions}) +func (m *mockCloudBuild) ApproveDownstreamGenAndTest(prNumber, commitSha string) error { + m.calledMethods["ApproveDownstreamGenAndTest"] = append(m.calledMethods["ApproveDownstreamGenAndTest"], []any{prNumber, commitSha}) return nil } From d58136eae0f919c9b0383b73d0a3bc5b32e08eae Mon Sep 17 00:00:00 2001 From: himanikh Date: Fri, 6 Sep 2024 08:22:44 -0700 Subject: [PATCH 49/60] memorystore open api (#11463) --- mmv1/products/memorystore/Instance.yaml | 343 ++++++++++++++++++ mmv1/products/memorystore/product.yaml | 10 + .../decoders/memorystore_instance.go.erb | 42 +++ .../encoders/memorystore_instance.go.erb | 27 ++ .../memorystore_instance_basic.tf.erb | 48 +++ .../examples/memorystore_instance_full.tf.erb | 70 ++++ ...emorystore_instance_persistence_aof.tf.erb | 53 +++ .../components/inputs/services_beta.kt | 5 + .../components/inputs/services_ga.kt | 5 + .../resource_memorystore_instance_test.go.erb | 327 +++++++++++++++++ 10 files changed, 930 insertions(+) create mode 100644 mmv1/products/memorystore/Instance.yaml create mode 100644 mmv1/products/memorystore/product.yaml create mode 100644 mmv1/templates/terraform/decoders/memorystore_instance.go.erb create mode 100644 mmv1/templates/terraform/encoders/memorystore_instance.go.erb create mode 100644 mmv1/templates/terraform/examples/memorystore_instance_basic.tf.erb create mode 100644 mmv1/templates/terraform/examples/memorystore_instance_full.tf.erb create mode 100644 mmv1/templates/terraform/examples/memorystore_instance_persistence_aof.tf.erb create mode 100644 mmv1/third_party/terraform/services/memorystore/resource_memorystore_instance_test.go.erb diff --git a/mmv1/products/memorystore/Instance.yaml b/mmv1/products/memorystore/Instance.yaml new file mode 100644 index 000000000000..cda74778ad22 --- /dev/null +++ b/mmv1/products/memorystore/Instance.yaml @@ -0,0 +1,343 @@ +--- +!ruby/object:Api::Resource +base_url: "projects/{{project}}/locations/{{location}}/instances" +create_url: "projects/{{project}}/locations/{{location}}/instances?instanceId={{instance_id}}" +self_link: "projects/{{project}}/locations/{{location}}/instances/{{instance_id}}" +id_format: "projects/{{project}}/locations/{{location}}/instances/{{instance_id}}" +import_format: + - "projects/{{project}}/locations/{{location}}/instances/{{instance_id}}" +name: Instance +description: A Google Cloud Memorystore instance. +update_verb: :PATCH +update_mask: true +autogen_async: true +min_version: beta +timeouts: !ruby/object:Api::Timeouts + insert_minutes: 60 + update_minutes: 120 + delete_minutes: 30 +custom_code: !ruby/object:Provider::Terraform::CustomCode + encoder: templates/terraform/encoders/memorystore_instance.go.erb + decoder: templates/terraform/decoders/memorystore_instance.go.erb +examples: + - !ruby/object:Provider::Terraform::Examples + name: "memorystore_instance_basic" + primary_resource_id: "instance-basic" + min_version: beta + vars: + instance_name: "basic-instance" + policy_name: "my-policy" + subnet_name: "my-subnet" + network_name: "my-network" + prevent_destroy: "true" + test_vars_overrides: + prevent_destroy: "false" + oics_vars_overrides: + prevent_destroy: "false" + - !ruby/object:Provider::Terraform::Examples + name: "memorystore_instance_full" + primary_resource_id: "instance-full" + min_version: beta + vars: + instance_name: "full-instance" + policy_name: "my-policy" + subnet_name: "my-subnet" + network_name: "my-network" + prevent_destroy: "true" + test_vars_overrides: + prevent_destroy: "false" + oics_vars_overrides: + prevent_destroy: "false" + - !ruby/object:Provider::Terraform::Examples + name: "memorystore_instance_persistence_aof" + primary_resource_id: "instance-persistence-aof" + min_version: beta + vars: + instance_name: "aof-instance" + policy_name: "my-policy" + subnet_name: "my-subnet" + network_name: "my-network" + prevent_destroy: "true" + test_vars_overrides: + prevent_destroy: "false" + oics_vars_overrides: + prevent_destroy: "false" +properties: + - !ruby/object:Api::Type::String + name: name + description: "Identifier. Unique name of the instance.\nFormat: projects/{project}/locations/{location}/instances/{instance} " + output: true + - !ruby/object:Api::Type::String + name: createTime + description: "Output only. Creation timestamp of the instance. " + output: true + - !ruby/object:Api::Type::String + name: updateTime + description: "Output only. Latest update timestamp of the instance. " + output: true + - !ruby/object:Api::Type::KeyValueLabels + name: labels + description: "Optional. Labels to represent user-provided metadata. " + - !ruby/object:Api::Type::String + name: state + description: + "Output only. Current state of the instance. \n Possible values:\n + CREATING\nACTIVE\nUPDATING\nDELETING" + output: true + - !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Integer + name: targetShardCount + description: "Output only. Target number of shards for the instance. " + output: true + - !ruby/object:Api::Type::Integer + name: targetReplicaCount + description: + "Output only. Target number of replica nodes per shard for the + instance. " + output: true + name: updateInfo + description: "Represents information about instance with state UPDATING. " + output: true + name: stateInfo + description: "Additional information about the state of the instance. " + output: true + - !ruby/object:Api::Type::String + name: uid + description: "Output only. System assigned, unique identifier for the instance. " + output: true + - !ruby/object:Api::Type::Integer + name: replicaCount + description: + "Optional. Number of replica nodes per shard. If omitted the default + is 0 replicas. " + default_from_api: true + - !ruby/object:Api::Type::String + name: authorizationMode + description: + "Optional. Immutable. Authorization mode of the instance. \n Possible + values:\n AUTH_DISABLED\nIAM_AUTH" + immutable: true + default_from_api: true + - !ruby/object:Api::Type::String + name: transitEncryptionMode + description: + "Optional. Immutable. In-transit encryption mode of the instance. \n + Possible values:\n TRANSIT_ENCRYPTION_DISABLED\nSERVER_AUTHENTICATION" + immutable: true + default_from_api: true + - !ruby/object:Api::Type::Integer + name: shardCount + description: "Required. Number of shards for the instance. " + required: true + - !ruby/object:Api::Type::Array + name: discoveryEndpoints + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: address + description: + "Output only. IP address of the exposed endpoint clients connect + to. " + output: true + - !ruby/object:Api::Type::Integer + name: port + description: "Output only. The port number of the exposed endpoint. " + output: true + - !ruby/object:Api::Type::String + name: network + description: + "Output only. The network where the IP address of the discovery + endpoint will be\nreserved, in the form of\nprojects/{network_project}/global/networks/{network_id}. " + output: true + description: + "Output only. Endpoints clients can connect to the instance through. + Currently only one\ndiscovery endpoint is supported. " + output: true + - !ruby/object:Api::Type::String + name: nodeType + description: + "Optional. Immutable. Machine type for individual nodes of the instance. + \n Possible values:\n SHARED_CORE_NANO\nHIGHMEM_MEDIUM\nHIGHMEM_XLARGE\nSTANDARD_SMALL" + immutable: true + default_from_api: true + - !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Enum + name: mode + description: "Optional. Current persistence mode. \n Possible values:\nDISABLED\nRDB\nAOF" + values: + - :DISABLED + - :RDB + - :AOF + default_from_api: true + - !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: rdbSnapshotPeriod + default_from_api: true + description: + "Optional. Period between RDB snapshots. \n Possible values:\n + ONE_HOUR\nSIX_HOURS\nTWELVE_HOURS\nTWENTY_FOUR_HOURS" + - !ruby/object:Api::Type::String + name: rdbSnapshotStartTime + description: + "Optional. Time that the first snapshot was/will be attempted, + and to which future\nsnapshots will be aligned. If not provided, the current + time will be\nused. " + default_from_api: true + name: rdbConfig + description: "Configuration for RDB based persistence. " + default_from_api: true + - !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: appendFsync + description: "Optional. The fsync mode. \n Possible values:\n NEVER\nEVERY_SEC\nALWAYS" + default_from_api: true + name: aofConfig + description: "Configuration for AOF based persistence. " + default_from_api: true + name: persistenceConfig + default_from_api: true + description: "Represents persistence configuration for a instance. " + - !ruby/object:Api::Type::String + name: engineVersion + description: "Optional. Immutable. Engine version of the instance. " + immutable: true + default_from_api: true + - !ruby/object:Api::Type::KeyValuePairs + name: engineConfigs + description: "Optional. User-provided engine configurations for the instance. " + - !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Double + name: sizeGb + description: "Output only. Memory size in GB of the node. " + output: true + name: nodeConfig + output: true + description: "Represents configuration for nodes of the instance. " + - !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: zone + description: + "Optional. Defines zone where all resources will be allocated with + SINGLE_ZONE mode.\nIgnored for MULTI_ZONE mode. " + immutable: true + - !ruby/object:Api::Type::Enum + name: mode + values: + - :MULTI_ZONE + - :SINGLE_ZONE + default_from_api: true + description: + "Optional. Current zone distribution mode. Defaults to MULTI_ZONE. + \n Possible values:\n MULTI_ZONE\nSINGLE_ZONE" + name: zoneDistributionConfig + immutable: true + default_from_api: true + description: "Zone distribution configuration for allocation of instance resources. " + - !ruby/object:Api::Type::Boolean + name: deletionProtectionEnabled + description: "Optional. If set to true deletion of the instance will fail. " + default_value: true + - !ruby/object:Api::Type::Array + name: pscAutoConnections + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: pscConnectionId + description: + "Output only. The PSC connection id of the forwarding rule connected + to the\nservice attachment. " + output: true + - !ruby/object:Api::Type::String + name: ipAddress + description: + "Output only. The IP allocated on the consumer network for the + PSC forwarding rule. " + output: true + - !ruby/object:Api::Type::String + name: forwardingRule + description: "Output only. The URI of the consumer side forwarding rule.\nFormat:\nprojects/{project}/regions/{region}/forwardingRules/{forwarding_rule} " + output: true + - !ruby/object:Api::Type::String + name: projectId + description: + "Output only. The consumer project_id where the forwarding rule is + created from. " + output: true + - !ruby/object:Api::Type::String + name: network + description: + "Output only. The consumer network where the IP address resides, in + the form of\nprojects/{project_id}/global/networks/{network_id}. " + output: true + description: + "Output only. User inputs and resource details of the auto-created + PSC connections. " + output: true +parameters: + - !ruby/object:Api::Type::String + name: location + description: + "Resource ID segment making up resource `name`. It identifies the resource + within its parent collection as described in https://google.aip.dev/122. See documentation + for resource type `memorystore.googleapis.com/CertificateAuthority`. " + url_param_only: true + required: true + immutable: true + - !ruby/object:Api::Type::String + name: instanceId + description: + "Required. The ID to use for the instance, which will become the final + component of\nthe instance's resource name.\n\nThis value is subject to the following + restrictions:\n\n* Must be 4-63 characters in length\n* Must begin with a letter + or digit\n* Must contain only lowercase letters, digits, and hyphens\n* Must not + end with a hyphen\n* Must be unique within a location " + url_param_only: true + required: true + immutable: true +virtual_fields: + - !ruby/object:Api::Type::Array + name: desired_psc_auto_connections + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: project_id + description: + "Required. The consumer project_id where the forwarding rule is + created from. " + required: true + - !ruby/object:Api::Type::String + name: network + description: + "Required. The consumer network where the IP address resides, in + the form of\nprojects/{project_id}/global/networks/{network_id}. " + required: true + description: "Required. Immutable. User inputs for the auto-created + PSC connections. " + immutable: true + required: true +async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: name + base_url: "{{op_id}}" + wait_ms: 1000 + timeouts: + result: !ruby/object:Api::OpAsync::Result + path: response + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: done + complete: true + allowed: + - true + - false + error: !ruby/object:Api::OpAsync::Error + path: error + message: message diff --git a/mmv1/products/memorystore/product.yaml b/mmv1/products/memorystore/product.yaml new file mode 100644 index 000000000000..c0adf8a0fcde --- /dev/null +++ b/mmv1/products/memorystore/product.yaml @@ -0,0 +1,10 @@ +--- +!ruby/object:Api::Product +versions: + - !ruby/object:Api::Product::Version + base_url: https://memorystore.googleapis.com/v1beta/ + name: beta +name: Memorystore +display_name: Memorystore +scopes: + - https://www.googleapis.com/auth/cloud-platform diff --git a/mmv1/templates/terraform/decoders/memorystore_instance.go.erb b/mmv1/templates/terraform/decoders/memorystore_instance.go.erb new file mode 100644 index 000000000000..96061ee6d183 --- /dev/null +++ b/mmv1/templates/terraform/decoders/memorystore_instance.go.erb @@ -0,0 +1,42 @@ +// Retrieve pscAutoConnections from API response +v, ok := res["pscAutoConnections"] +if !ok { + return nil, fmt.Errorf("pscAutoConnections field not found in API response") +} + +connections, ok := v.([]interface{}) +if !ok { + return nil, fmt.Errorf("pscAutoConnections is not an array") +} + +transformed := make([]interface{}, 0, len(connections)) +uniqueConnections := make(map[string]bool) // Track unique project+network combos + +for _, raw := range connections { + connectionData, ok := raw.(map[string]interface{}) + if !ok || len(connectionData) < 1 { + return nil, fmt.Errorf("Invalid or empty psc connection data: %v", raw) + } + + projectID, ok := connectionData["projectId"].(string) + if !ok { + return nil, fmt.Errorf("invalid project ID in psc connection: %v", connectionData) + } + + networkID, ok := connectionData["network"].(string) + if !ok { + return nil, fmt.Errorf("invalid network ID in psc connection: %v", connectionData) + } + + uniqueKey := projectID + networkID + if !uniqueConnections[uniqueKey] { // Check for uniqueness + uniqueConnections[uniqueKey] = true + transformed = append(transformed, map[string]interface{}{ + "project_id": projectID, + "network": networkID, + }) + } +} + +d.Set("desired_psc_auto_connections", transformed) +return res, nil diff --git a/mmv1/templates/terraform/encoders/memorystore_instance.go.erb b/mmv1/templates/terraform/encoders/memorystore_instance.go.erb new file mode 100644 index 000000000000..b6ab1daf78a5 --- /dev/null +++ b/mmv1/templates/terraform/encoders/memorystore_instance.go.erb @@ -0,0 +1,27 @@ +v, ok := d.GetOk("desired_psc_auto_connections") +if !ok { + return obj, nil // No desired connections, nothing to update +} +l := v.([]interface{}) +req := make([]interface{}, 0, len(l)) +for _, raw := range l { + if raw == nil { + continue + } + desiredConnection := raw.(map[string]interface{}) + connectionReq := make(map[string]interface{}) + + projectId := desiredConnection["project_id"] + if val := reflect.ValueOf(projectId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + connectionReq["projectId"] = projectId + } + + network := desiredConnection["network"] + if val := reflect.ValueOf(network); val.IsValid() && !tpgresource.IsEmptyValue(val) { + connectionReq["network"] = network + } + + req = append(req, connectionReq) +} +obj["pscAutoConnections"] = req +return obj, nil diff --git a/mmv1/templates/terraform/examples/memorystore_instance_basic.tf.erb b/mmv1/templates/terraform/examples/memorystore_instance_basic.tf.erb new file mode 100644 index 000000000000..8d020bf4448a --- /dev/null +++ b/mmv1/templates/terraform/examples/memorystore_instance_basic.tf.erb @@ -0,0 +1,48 @@ +resource "google_memorystore_instance" "<%= ctx[:primary_resource_id] %>" { + provider = google-beta + instance_id = "<%= ctx[:vars]['instance_name'] %>" + shard_count = 3 + desired_psc_auto_connections { + network = google_compute_network.producer_net.id + project_id = data.google_project.project.project_id + } + location = "us-central1" + deletion_protection_enabled = false + depends_on = [ + google_network_connectivity_service_connection_policy.default + ] + + lifecycle { + prevent_destroy = "<%= ctx[:vars]['prevent_destroy'] %>" + } +} + +resource "google_network_connectivity_service_connection_policy" "default" { + provider = google-beta + name = "<%= ctx[:vars]['policy_name'] %>" + location = "us-central1" + service_class = "gcp-memorystore" + description = "my basic service connection policy" + network = google_compute_network.producer_net.id + psc_config { + subnetworks = [google_compute_subnetwork.producer_subnet.id] + } +} + +resource "google_compute_subnetwork" "producer_subnet" { + provider = google-beta + name = "<%= ctx[:vars]['subnet_name'] %>" + ip_cidr_range = "10.0.0.248/29" + region = "us-central1" + network = google_compute_network.producer_net.id +} + +resource "google_compute_network" "producer_net" { + provider = google-beta + name = "<%= ctx[:vars]['network_name'] %>" + auto_create_subnetworks = false +} + +data "google_project" "project" { + provider = google-beta +} diff --git a/mmv1/templates/terraform/examples/memorystore_instance_full.tf.erb b/mmv1/templates/terraform/examples/memorystore_instance_full.tf.erb new file mode 100644 index 000000000000..59cbc24c408a --- /dev/null +++ b/mmv1/templates/terraform/examples/memorystore_instance_full.tf.erb @@ -0,0 +1,70 @@ +resource "google_memorystore_instance" "<%= ctx[:primary_resource_id] %>" { + provider = google-beta + instance_id = "<%= ctx[:vars]['instance_name'] %>" + shard_count = 3 + desired_psc_auto_connections { + network = google_compute_network.producer_net.id + project_id = data.google_project.project.project_id + } + location = "us-central1" + replica_count = 2 + node_type = "SHARED_CORE_NANO" + transit_encryption_mode = "TRANSIT_ENCRYPTION_DISABLED" + authorization_mode = "AUTH_DISABLED" + engine_configs = { + maxmemory-policy = "volatile-ttl" + } + zone_distribution_config { + mode = "SINGLE_ZONE" + zone = "us-central1-b" + } + engine_version = "VALKEY_7_2" + deletion_protection_enabled = false + persistence_config { + mode = "RDB" + rdb_config { + rdb_snapshot_period = "ONE_HOUR" + rdb_snapshot_start_time = "2024-10-02T15:01:23Z" + } + } + labels = { + "abc" : "xyz" + } + depends_on = [ + google_network_connectivity_service_connection_policy.default + ] + + lifecycle { + prevent_destroy = "<%= ctx[:vars]['prevent_destroy'] %>" + } +} + +resource "google_network_connectivity_service_connection_policy" "default" { + provider = google-beta + name = "<%= ctx[:vars]['policy_name'] %>" + location = "us-central1" + service_class = "gcp-memorystore" + description = "my basic service connection policy" + network = google_compute_network.producer_net.id + psc_config { + subnetworks = [google_compute_subnetwork.producer_subnet.id] + } +} + +resource "google_compute_subnetwork" "producer_subnet" { + provider = google-beta + name = "<%= ctx[:vars]['subnet_name'] %>" + ip_cidr_range = "10.0.0.248/29" + region = "us-central1" + network = google_compute_network.producer_net.id +} + +resource "google_compute_network" "producer_net" { + provider = google-beta + name = "<%= ctx[:vars]['network_name'] %>" + auto_create_subnetworks = false +} + +data "google_project" "project" { + provider = google-beta +} \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/memorystore_instance_persistence_aof.tf.erb b/mmv1/templates/terraform/examples/memorystore_instance_persistence_aof.tf.erb new file mode 100644 index 000000000000..6a1e9894c995 --- /dev/null +++ b/mmv1/templates/terraform/examples/memorystore_instance_persistence_aof.tf.erb @@ -0,0 +1,53 @@ +resource "google_memorystore_instance" "<%= ctx[:primary_resource_id] %>" { + provider = google-beta + instance_id = "<%= ctx[:vars]['instance_name'] %>" + shard_count = 3 + desired_psc_auto_connections { + network = google_compute_network.producer_net.id + project_id = data.google_project.project.project_id + } + location = "us-central1" + persistence_config { + mode = "AOF" + aof_config { + append_fsync = "EVERY_SEC" + } + } + depends_on = [ + google_network_connectivity_service_connection_policy.default + ] + deletion_protection_enabled = false + lifecycle { + prevent_destroy = "<%= ctx[:vars]['prevent_destroy'] %>" + } +} + +resource "google_network_connectivity_service_connection_policy" "default" { + provider = google-beta + name = "<%= ctx[:vars]['policy_name'] %>" + location = "us-central1" + service_class = "gcp-memorystore" + description = "my basic service connection policy" + network = google_compute_network.producer_net.id + psc_config { + subnetworks = [google_compute_subnetwork.producer_subnet.id] + } +} + +resource "google_compute_subnetwork" "producer_subnet" { + provider = google-beta + name = "<%= ctx[:vars]['subnet_name'] %>" + ip_cidr_range = "10.0.0.248/29" + region = "us-central1" + network = google_compute_network.producer_net.id +} + +resource "google_compute_network" "producer_net" { + provider = google-beta + name = "<%= ctx[:vars]['network_name'] %>" + auto_create_subnetworks = false +} + +data "google_project" "project" { + provider = google-beta +} \ No newline at end of file diff --git a/mmv1/third_party/terraform/.teamcity/components/inputs/services_beta.kt b/mmv1/third_party/terraform/.teamcity/components/inputs/services_beta.kt index a6c8565d1780..85f975181dac 100644 --- a/mmv1/third_party/terraform/.teamcity/components/inputs/services_beta.kt +++ b/mmv1/third_party/terraform/.teamcity/components/inputs/services_beta.kt @@ -491,6 +491,11 @@ var ServicesListBeta = mapOf( "displayName" to "Memcache", "path" to "./google-beta/services/memcache" ), + "memorystore" to mapOf( + "name" to "memorystore", + "displayName" to "Memorystore", + "path" to "./google-beta/services/memorystore" + ), "migrationcenter" to mapOf( "name" to "migrationcenter", "displayName" to "Migrationcenter", diff --git a/mmv1/third_party/terraform/.teamcity/components/inputs/services_ga.kt b/mmv1/third_party/terraform/.teamcity/components/inputs/services_ga.kt index bc928ad729c1..05ada2da4b20 100644 --- a/mmv1/third_party/terraform/.teamcity/components/inputs/services_ga.kt +++ b/mmv1/third_party/terraform/.teamcity/components/inputs/services_ga.kt @@ -486,6 +486,11 @@ var ServicesListGa = mapOf( "displayName" to "Memcache", "path" to "./google/services/memcache" ), + "memorystore" to mapOf( + "name" to "memorystore", + "displayName" to "Memorystore", + "path" to "./google/services/memorystore" + ), "migrationcenter" to mapOf( "name" to "migrationcenter", "displayName" to "Migrationcenter", diff --git a/mmv1/third_party/terraform/services/memorystore/resource_memorystore_instance_test.go.erb b/mmv1/third_party/terraform/services/memorystore/resource_memorystore_instance_test.go.erb new file mode 100644 index 000000000000..5857a242c589 --- /dev/null +++ b/mmv1/third_party/terraform/services/memorystore/resource_memorystore_instance_test.go.erb @@ -0,0 +1,327 @@ +<% autogen_exception -%> +package memorystore_test +<% unless version == "ga" -%> + +import ( + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +// Validate that replica count is updated for the instance +func TestAccMemorystoreInstance_updateReplicaCount(t *testing.T) { + t.Parallel() + + name := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckMemorystoreInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + // create instance with replica count 1 + Config: createOrUpdateMemorystoreInstance(&InstanceParams{name: name, replicaCount: 1, shardCount: 3, preventDestroy: true, zoneDistributionMode: "MULTI_ZONE", deletionProtectionEnabled: false}), + }, + { + ResourceName: "google_memorystore_instance.test", + ImportState: true, + ImportStateVerify: true, + }, + { + // update replica count to 2 + Config: createOrUpdateMemorystoreInstance(&InstanceParams{name: name, replicaCount: 2, shardCount: 3, preventDestroy: true, zoneDistributionMode: "MULTI_ZONE", deletionProtectionEnabled: false}), + }, + { + ResourceName: "google_memorystore_instance.test", + ImportState: true, + ImportStateVerify: true, + }, + { + // clean up the resource + Config: createOrUpdateMemorystoreInstance(&InstanceParams{name: name, replicaCount: 2, shardCount: 3, preventDestroy: false, zoneDistributionMode: "MULTI_ZONE", deletionProtectionEnabled: false}), + }, + }, + }) +} + +// Validate that shard count is updated for the cluster +func TestAccMemorystoreInstance_updateShardCount(t *testing.T) { + t.Parallel() + + name := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckMemorystoreInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + // create cluster with shard count 3 + Config: createOrUpdateMemorystoreInstance(&InstanceParams{name: name, replicaCount: 1, shardCount: 3, preventDestroy: true, zoneDistributionMode: "MULTI_ZONE", deletionProtectionEnabled: false}), + }, + { + ResourceName: "google_memorystore_instance.test", + ImportState: true, + ImportStateVerify: true, + }, + { + // update shard count to 5 + Config: createOrUpdateMemorystoreInstance(&InstanceParams{name: name, replicaCount: 1, shardCount: 5, preventDestroy: true, zoneDistributionMode: "MULTI_ZONE", deletionProtectionEnabled: false}), + }, + { + ResourceName: "google_memorystore_instance.test", + ImportState: true, + ImportStateVerify: true, + }, + { + // clean up the resource + Config: createOrUpdateMemorystoreInstance(&InstanceParams{name: name, replicaCount: 1, shardCount: 5, preventDestroy: false, zoneDistributionMode: "MULTI_ZONE", deletionProtectionEnabled: false}), + }, + }, + }) +} + +// Validate that engineConfigs is updated for the cluster +func TestAccMemorystoreInstance_updateRedisConfigs(t *testing.T) { + t.Parallel() + + name := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckMemorystoreInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + // create cluster + Config: createOrUpdateMemorystoreInstance(&InstanceParams{ + name: name, + shardCount: 3, + zoneDistributionMode: "MULTI_ZONE", + engineConfigs: map[string]string{ + "maxmemory-policy": "volatile-ttl", + }, + deletionProtectionEnabled: false}), + }, + { + ResourceName: "google_memorystore_instance.test", + ImportState: true, + ImportStateVerify: true, + }, + { + // add a new redis config key-value pair and update existing redis config + Config: createOrUpdateMemorystoreInstance(&InstanceParams{ + name: name, + shardCount: 3, + zoneDistributionMode: "MULTI_ZONE", + engineConfigs: map[string]string{ + "maxmemory-policy": "allkeys-lru", + "maxmemory-clients": "90%", + }, + deletionProtectionEnabled: false}), + }, + { + ResourceName: "google_memorystore_instance.test", + ImportState: true, + ImportStateVerify: true, + }, + { + // remove all redis configs + Config: createOrUpdateMemorystoreInstance(&InstanceParams{ + name: name, + shardCount: 3, + zoneDistributionMode: "MULTI_ZONE", + engineConfigs: map[string]string{ + "maxmemory-policy": "allkeys-lru", + "maxmemory-clients": "90%", + }, + deletionProtectionEnabled: false}), + }, + }, + }) +} + +// Validate that deletion protection is updated for the cluster +func TestAccMemorystoreInstance_updateDeletionProtection(t *testing.T) { + t.Parallel() + + name := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckMemorystoreInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + // create cluster with deletion protection true + Config: createOrUpdateMemorystoreInstance(&InstanceParams{ + name: name, + shardCount: 3, + zoneDistributionMode: "MULTI_ZONE", + deletionProtectionEnabled: true, + }), + }, + { + ResourceName: "google_memorystore_instance.test", + ImportState: true, + ImportStateVerify: true, + }, + { + // update cluster with deletion protection false + Config: createOrUpdateMemorystoreInstance(&InstanceParams{ + name: name, + shardCount: 3, + zoneDistributionMode: "MULTI_ZONE", + deletionProtectionEnabled: false, + }), + }, + { + ResourceName: "google_memorystore_instance.test", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +// Validate that persistence config is updated for the cluster +func TestAccMemorystoreInstance_updatePersistence(t *testing.T) { + t.Parallel() + + name := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckMemorystoreInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + // create instance with AOF enabled + Config: createOrUpdateMemorystoreInstance(&InstanceParams{name: name, replicaCount: 0, shardCount: 3, preventDestroy: true, zoneDistributionMode: "MULTI_ZONE", persistenceMode: "AOF", deletionProtectionEnabled: false}), + }, + { + ResourceName: "google_memorystore_instance.test", + ImportState: true, + ImportStateVerify: true, + }, + { + // update persitence to RDB + Config: createOrUpdateMemorystoreInstance(&InstanceParams{name: name, replicaCount: 0, shardCount: 3, preventDestroy: true, zoneDistributionMode: "MULTI_ZONE", persistenceMode: "RDB", deletionProtectionEnabled: false}), + }, + { + ResourceName: "google_memorystore_instance.test", + ImportState: true, + ImportStateVerify: true, + }, + { + // clean up the resource + Config: createOrUpdateMemorystoreInstance(&InstanceParams{name: name, replicaCount: 0, shardCount: 3, preventDestroy: false, zoneDistributionMode: "MULTI_ZONE", persistenceMode: "RDB", deletionProtectionEnabled: false}), + }, + }, + }) +} + +type InstanceParams struct { + name string + replicaCount int + shardCount int + preventDestroy bool + nodeType string + engineConfigs map[string]string + zoneDistributionMode string + zone string + deletionProtectionEnabled bool + persistenceMode string +} + +func createOrUpdateMemorystoreInstance(params *InstanceParams) string { + lifecycleBlock := "" + if params.preventDestroy { + lifecycleBlock = ` + lifecycle { + prevent_destroy = true + }` + } + var strBuilder strings.Builder + for key, value := range params.engineConfigs { + strBuilder.WriteString(fmt.Sprintf("%s = \"%s\"\n", key, value)) + } + + zoneDistributionConfigBlock := `` + if params.zoneDistributionMode != "" { + zoneDistributionConfigBlock = fmt.Sprintf(` + zone_distribution_config { + mode = "%s" + zone = "%s" + } + `, params.zoneDistributionMode, params.zone) + } + persistenceBlock := `` + if params.persistenceMode != "" { + persistenceBlock = fmt.Sprintf(` + persistence_config { + mode = "%s" + } + `, params.persistenceMode) + } + return fmt.Sprintf(` +resource "google_memorystore_instance" "test" { + provider = google-beta + instance_id = "%s" + replica_count = %d + shard_count = %d + node_type = "%s" + location = "europe-west1" + desired_psc_auto_connections { + network = google_compute_network.producer_net.id + project_id = data.google_project.project.project_id + } + deletion_protection_enabled = %t + engine_configs = { + %s + } + %s + %s + depends_on = [ + google_network_connectivity_service_connection_policy.default + ] + %s +} + +resource "google_network_connectivity_service_connection_policy" "default" { + provider = google-beta + name = "%s" + location = "europe-west1" + service_class = "gcp-memorystore" + description = "my basic service connection policy" + network = google_compute_network.producer_net.id + psc_config { + subnetworks = [google_compute_subnetwork.producer_subnet.id] + } +} + +resource "google_compute_subnetwork" "producer_subnet" { + provider = google-beta + name = "%s" + ip_cidr_range = "10.0.0.248/29" + region = "europe-west1" + network = google_compute_network.producer_net.id +} + +resource "google_compute_network" "producer_net" { + provider = google-beta + name = "%s" + auto_create_subnetworks = false +} + +data "google_project" "project" { + provider = google-beta +} +`, params.name, params.replicaCount, params.shardCount, params.nodeType, params.deletionProtectionEnabled, strBuilder.String(), zoneDistributionConfigBlock, persistenceBlock, lifecycleBlock, params.name, params.name, params.name) +} + +<% end -%> From ea614874795283c33d234e32219c5274a3f13ffa Mon Sep 17 00:00:00 2001 From: Shuya Ma <87669292+shuyama1@users.noreply.github.com> Date: Fri, 6 Sep 2024 09:05:22 -0700 Subject: [PATCH 50/60] Bootstrap storage pool in tests (#11598) --- .../terraform/acctest/bootstrap_test_utils.go | 72 ++++++++++++++++ .../compute/resource_compute_disk_test.go.erb | 82 +------------------ .../resource_compute_instance_test.go.erb | 53 +----------- 3 files changed, 76 insertions(+), 131 deletions(-) diff --git a/mmv1/third_party/terraform/acctest/bootstrap_test_utils.go b/mmv1/third_party/terraform/acctest/bootstrap_test_utils.go index 0a4869b32dd6..807dea324306 100644 --- a/mmv1/third_party/terraform/acctest/bootstrap_test_utils.go +++ b/mmv1/third_party/terraform/acctest/bootstrap_test_utils.go @@ -1107,6 +1107,78 @@ func BootstrapFirewallForDataprocSharedNetwork(t *testing.T, firewallName string return firewall.Name } +const SharedStoragePoolPrefix = "tf-bootstrap-storage-pool-" + +func BootstrapComputeStoragePool(t *testing.T, storagePoolName, storagePoolType string) string { + projectID := envvar.GetTestProjectFromEnv() + zone := envvar.GetTestZoneFromEnv() + + storagePoolName = SharedStoragePoolPrefix + storagePoolType + "-" + storagePoolName + + config := BootstrapConfig(t) + if config == nil { + t.Fatal("Could not bootstrap config.") + } + + computeService := config.NewComputeClient(config.UserAgent) + if computeService == nil { + t.Fatal("Could not create compute client.") + } + + _, err := computeService.StoragePools.Get(projectID, zone, storagePoolName).Do() + if err != nil && transport_tpg.IsGoogleApiErrorWithCode(err, 404) { + log.Printf("[DEBUG] Storage pool %q not found, bootstrapping", storagePoolName) + + url := fmt.Sprintf("%sprojects/%s/zones/%s/storagePools", config.ComputeBasePath, projectID, zone) + storagePoolTypeUrl := fmt.Sprintf("/projects/%s/zones/%s/storagePoolTypes/%s", projectID, zone, storagePoolType) + + storagePoolObj := map[string]interface{}{ + "name": storagePoolName, + "poolProvisionedCapacityGb": 10240, + "poolProvisionedThroughput": 180, + "storagePoolType": storagePoolTypeUrl, + "capacityProvisioningType": "ADVANCED", + } + + if storagePoolType == "hyperdisk-balanced" { + storagePoolObj["poolProvisionedIops"] = 10000 + storagePoolObj["poolProvisionedThroughput"] = 1024 + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: projectID, + RawURL: url, + UserAgent: config.UserAgent, + Body: storagePoolObj, + Timeout: 20 * time.Minute, + }) + + log.Printf("Response is, %s", res) + if err != nil { + t.Fatalf("Error bootstrapping storage pool %s: %s", storagePoolName, err) + } + + log.Printf("[DEBUG] Waiting for storage pool creation to finish") + err = tpgcompute.ComputeOperationWaitTime(config, res, projectID, "Error bootstrapping storage pool", config.UserAgent, 4*time.Minute) + if err != nil { + t.Fatalf("Error bootstrapping test storage pool %s: %s", storagePoolName, err) + } + } + + storagePool, err := computeService.StoragePools.Get(projectID, zone, storagePoolName).Do() + + if storagePool == nil { + t.Fatalf("Error getting storage pool %s: is nil", storagePoolName) + } + + if err != nil { + t.Fatalf("Error getting storage pool %s: %s", storagePoolName, err) + } + return storagePool.SelfLink +} + func SetupProjectsAndGetAccessToken(org, billing, pid, service string, config *transport_tpg.Config) (string, error) { // Create project-1 and project-2 rmService := config.NewResourceManagerClient(config.UserAgent) diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_disk_test.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_disk_test.go.erb index 9a9633c1c1b5..b57ae8920f58 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_disk_test.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_disk_test.go.erb @@ -3,17 +3,14 @@ package compute_test import ( "fmt" - "net/http" "os" "testing" - "time" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-google/google/acctest" "github.com/hashicorp/terraform-provider-google/google/envvar" tpgcompute "github.com/hashicorp/terraform-provider-google/google/services/compute" - transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" <% if version == "ga" -%> "google.golang.org/api/compute/v1" @@ -1615,12 +1612,9 @@ resource "google_compute_disk" "foobar" { } func TestAccComputeDisk_storagePoolSpecified(t *testing.T) { - // Currently failing - acctest.SkipIfVcr(t) t.Parallel() - storagePoolName := fmt.Sprintf("tf-test-storage-pool-%s", acctest.RandString(t, 10)) - storagePoolUrl := fmt.Sprintf("/projects/%s/zones/%s/storagePools/%s", envvar.GetTestProjectFromEnv(), envvar.GetTestZoneFromEnv(), storagePoolName) + storagePoolNameLong := acctest.BootstrapComputeStoragePool(t, "basic-1", "hyperdisk-throughput") diskName := fmt.Sprintf("tf-test-disk-%s", acctest.RandString(t, 10)) acctest.VcrTest(t, resource.TestCase{ @@ -1628,11 +1622,7 @@ func TestAccComputeDisk_storagePoolSpecified(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), Steps: []resource.TestStep{ { - PreConfig: setupTestingStoragePool(t, storagePoolName), - Config: testAccComputeDisk_storagePoolSpecified(diskName, storagePoolUrl), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("google_compute_disk.foobar", "storage_pool", storagePoolName), - ), + Config: testAccComputeDisk_storagePoolSpecified(diskName, storagePoolNameLong), }, { ResourceName: "google_compute_disk.foobar", @@ -1641,74 +1631,6 @@ func TestAccComputeDisk_storagePoolSpecified(t *testing.T) { }, }, }) - - cleanupTestingStoragePool(t, storagePoolName) -} - -func setupTestingStoragePool(t *testing.T, storagePoolName string) func() { - return func() { - config := acctest.GoogleProviderConfig(t) - headers := make(http.Header) - project := envvar.GetTestProjectFromEnv() - zone := envvar.GetTestZoneFromEnv() - url := fmt.Sprintf("%sprojects/%s/zones/%s/storagePools", config.ComputeBasePath, project, zone) - storagePoolTypeUrl := fmt.Sprintf("/projects/%s/zones/%s/storagePoolTypes/hyperdisk-throughput", project, zone) - defaultTimeout := 20 * time.Minute - obj := make(map[string]interface{}) - obj["name"] = storagePoolName - obj["poolProvisionedCapacityGb"] = 10240 - obj["poolProvisionedThroughput"] = 180 - obj["storagePoolType"] = storagePoolTypeUrl - obj["capacityProvisioningType"] = "ADVANCED" - - res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ - Config: config, - Method: "POST", - Project: project, - RawURL: url, - UserAgent: config.UserAgent, - Body: obj, - Timeout: defaultTimeout, - Headers: headers, - }) - if err != nil { - t.Errorf("Error creating StoragePool: %s", err) - } - - err = tpgcompute.ComputeOperationWaitTime(config, res, project, "Creating StoragePool", config.UserAgent, defaultTimeout) - if err != nil { - t.Errorf("Error waiting to create StoragePool: %s", err) - } - } -} - -func cleanupTestingStoragePool(t *testing.T, storagePoolName string) { - config := acctest.GoogleProviderConfig(t) - headers := make(http.Header) - project := envvar.GetTestProjectFromEnv() - zone := envvar.GetTestZoneFromEnv() - url := fmt.Sprintf("%sprojects/%s/zones/%s/storagePools/%s", config.ComputeBasePath, project, zone, storagePoolName) - defaultTimeout := 20 * time.Minute - var obj map[string]interface{} - - res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ - Config: config, - Method: "DELETE", - Project: project, - RawURL: url, - UserAgent: config.UserAgent, - Body: obj, - Timeout: defaultTimeout, - Headers: headers, - }) - if err != nil { - t.Errorf("Error deleting StoragePool: %s", err) - } - - err = tpgcompute.ComputeOperationWaitTime(config, res, project, "Deleting StoragePool", config.UserAgent, defaultTimeout) - if err != nil { - t.Errorf("Error waiting to delete StoragePool: %s", err) - } } func testAccComputeDisk_storagePoolSpecified(diskName, storagePoolUrl string) string { diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_instance_test.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_instance_test.go.erb index 7b7ce8bd535a..1a85326f6392 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_instance_test.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_instance_test.go.erb @@ -9,7 +9,6 @@ import ( <% unless version == 'ga' -%> "google.golang.org/api/googleapi" <% end -%> - "net/http" "reflect" "regexp" "sort" @@ -25,7 +24,6 @@ import ( "github.com/hashicorp/terraform-provider-google/google/acctest" "github.com/hashicorp/terraform-provider-google/google/envvar" tpgcompute "github.com/hashicorp/terraform-provider-google/google/services/compute" - transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" "github.com/hashicorp/terraform-provider-google/google/tpgresource" <% if version == "ga" -%> @@ -10259,24 +10257,17 @@ resource "google_compute_instance" "foobar" { } func TestAccComputeInstance_bootDisk_storagePoolSpecified(t *testing.T) { - // Currently failing - acctest.SkipIfVcr(t) t.Parallel() instanceName := fmt.Sprintf("tf-test-instance-%s", acctest.RandString(t, 10)) - storagePoolName := fmt.Sprintf("tf-test-storage-pool-%s", acctest.RandString(t, 10)) - storagePoolUrl := fmt.Sprintf("/projects/%s/zones/%s/storagePools/%s", envvar.GetTestProjectFromEnv(), envvar.GetTestZoneFromEnv(), storagePoolName) + storagePoolNameLong := acctest.BootstrapComputeStoragePool(t, "basic-1", "hyperdisk-balanced") acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), Steps: []resource.TestStep{ { - PreConfig: setupTestingStoragePool_HyperdiskBalanced(t, storagePoolName), - Config: testAccComputeInstance_bootDisk_storagePoolSpecified(instanceName, storagePoolUrl, envvar.GetTestZoneFromEnv()), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("google_compute_instance.foobar", "boot_disk.0.initialize_params.0.storage_pool", storagePoolName), - ), + Config: testAccComputeInstance_bootDisk_storagePoolSpecified(instanceName, storagePoolNameLong, envvar.GetTestZoneFromEnv()), }, { ResourceName: "google_compute_instance.foobar", @@ -10285,46 +10276,6 @@ func TestAccComputeInstance_bootDisk_storagePoolSpecified(t *testing.T) { }, }, }) - - cleanupTestingStoragePool(t, storagePoolName) -} - -func setupTestingStoragePool_HyperdiskBalanced(t *testing.T, storagePoolName string) func() { - return func() { - config := acctest.GoogleProviderConfig(t) - headers := make(http.Header) - project := envvar.GetTestProjectFromEnv() - zone := envvar.GetTestZoneFromEnv() - url := fmt.Sprintf("%sprojects/%s/zones/%s/storagePools", config.ComputeBasePath, project, zone) - storagePoolTypeUrl := fmt.Sprintf("/projects/%s/zones/%s/storagePoolTypes/hyperdisk-balanced", project, zone) - defaultTimeout := 20 * time.Minute - obj := make(map[string]interface{}) - obj["name"] = storagePoolName - obj["poolProvisionedCapacityGb"] = 10240 - obj["poolProvisionedIops"] = 10000 - obj["poolProvisionedThroughput"] = 1024 - obj["storagePoolType"] = storagePoolTypeUrl - obj["capacityProvisioningType"] = "ADVANCED" - - res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ - Config: config, - Method: "POST", - Project: project, - RawURL: url, - UserAgent: config.UserAgent, - Body: obj, - Timeout: defaultTimeout, - Headers: headers, - }) - if err != nil { - t.Errorf("Error creating StoragePool: %s", err) - } - - err = tpgcompute.ComputeOperationWaitTime(config, res, project, "Creating StoragePool", config.UserAgent, defaultTimeout) - if err != nil { - t.Errorf("Error waiting to create StoragePool: %s", err) - } - } } func testAccComputeInstance_bootDisk_storagePoolSpecified(instanceName, storagePoolUrl, zone string) string { From a13ca6505fa3c36cab0ab4550332c76a8592ad96 Mon Sep 17 00:00:00 2001 From: karolgorc Date: Fri, 6 Sep 2024 18:21:39 +0200 Subject: [PATCH 51/60] Fix an edge-case in forceNewIfNetworkIPNotUpdatable function (#11653) --- .../terraform/services/compute/resource_compute_instance.go.erb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_instance.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_instance.go.erb index 6e6de85288d1..cfcfda8bb61c 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_instance.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_instance.go.erb @@ -151,7 +151,7 @@ func forceNewIfNetworkIPNotUpdatableFunc(d tpgresource.TerraformResourceDiff) er oldS, newS := d.GetChange(subnetworkKey) subnetworkProjectKey := prefix + ".subnetwork_project" networkIPKey := prefix + ".network_ip" - if d.HasChange(networkIPKey) { + if d.HasChange(networkIPKey) && d.Get(networkIPKey).(string) != "" { if tpgresource.CompareSelfLinkOrResourceName("", oldS.(string), newS.(string), nil) && !d.HasChange(subnetworkProjectKey) && tpgresource.CompareSelfLinkOrResourceName("", oldN.(string), newN.(string), nil) { if err := d.ForceNew(networkIPKey); err != nil { return err From d17814cf2e547c0b1010a46e76ea130822f412ac Mon Sep 17 00:00:00 2001 From: Sarah French <15078782+SarahFrench@users.noreply.github.com> Date: Fri, 6 Sep 2024 18:48:42 +0100 Subject: [PATCH 52/60] Dataproc Cluster: add more nil handling following API-side change (#11592) --- .../terraform/services/dataproc/resource_dataproc_cluster.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/mmv1/third_party/terraform/services/dataproc/resource_dataproc_cluster.go b/mmv1/third_party/terraform/services/dataproc/resource_dataproc_cluster.go index 8535f48d4833..66e53985a69c 100644 --- a/mmv1/third_party/terraform/services/dataproc/resource_dataproc_cluster.go +++ b/mmv1/third_party/terraform/services/dataproc/resource_dataproc_cluster.go @@ -2891,6 +2891,9 @@ func flattenSecurityConfig(d *schema.ResourceData, sc *dataproc.SecurityConfig) } func flattenKerberosConfig(d *schema.ResourceData, kfg *dataproc.KerberosConfig) []map[string]interface{} { + if kfg == nil { + return nil + } data := map[string]interface{}{ "enable_kerberos": kfg.EnableKerberos, "root_principal_password_uri": kfg.RootPrincipalPasswordUri, From 6231f4d544bcad3612ba6834c211f6e29c8f0cb4 Mon Sep 17 00:00:00 2001 From: Anubhav Sharma <67965197+anubhavsharma515@users.noreply.github.com> Date: Fri, 6 Sep 2024 14:32:53 -0400 Subject: [PATCH 53/60] Feat: Add new files for datasources google_bigquery_tables (#11552) --- .../provider/provider_mmv1_resources.go.erb | 1 + .../data_source_google_bigquery_tables.go | 147 ++++++++++++++++++ ...data_source_google_bigquery_tables_test.go | 66 ++++++++ .../docs/d/bigquery_tables.html.markdown | 41 +++++ 4 files changed, 255 insertions(+) create mode 100644 mmv1/third_party/terraform/services/bigquery/data_source_google_bigquery_tables.go create mode 100644 mmv1/third_party/terraform/services/bigquery/data_source_google_bigquery_tables_test.go create mode 100644 mmv1/third_party/terraform/website/docs/d/bigquery_tables.html.markdown diff --git a/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb b/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb index 283b39390f99..1a7908b56144 100644 --- a/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb +++ b/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb @@ -42,6 +42,7 @@ var handwrittenDatasources = map[string]*schema.Resource{ "google_beyondcorp_app_connector": beyondcorp.DataSourceGoogleBeyondcorpAppConnector(), "google_beyondcorp_app_gateway": beyondcorp.DataSourceGoogleBeyondcorpAppGateway(), "google_billing_account": billing.DataSourceGoogleBillingAccount(), + "google_bigquery_tables": bigquery.DataSourceGoogleBigQueryTables(), "google_bigquery_dataset": bigquery.DataSourceGoogleBigqueryDataset(), "google_bigquery_default_service_account": bigquery.DataSourceGoogleBigqueryDefaultServiceAccount(), "google_certificate_manager_certificates": certificatemanager.DataSourceGoogleCertificateManagerCertificates(), diff --git a/mmv1/third_party/terraform/services/bigquery/data_source_google_bigquery_tables.go b/mmv1/third_party/terraform/services/bigquery/data_source_google_bigquery_tables.go new file mode 100644 index 000000000000..7ad5f274b117 --- /dev/null +++ b/mmv1/third_party/terraform/services/bigquery/data_source_google_bigquery_tables.go @@ -0,0 +1,147 @@ +package bigquery + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceGoogleBigQueryTables() *schema.Resource { + + dsSchema := map[string]*schema.Schema{ + "dataset_id": { + Type: schema.TypeString, + Required: true, + Description: "The ID of the dataset containing the tables.", + }, + "project": { + Type: schema.TypeString, + Optional: true, + Description: "The ID of the project in which the dataset is located. If it is not provided, the provider project is used.", + }, + "tables": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "labels": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "table_id": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + } + + return &schema.Resource{ + Read: DataSourceGoogleBigQueryTablesRead, + Schema: dsSchema, + } +} + +func DataSourceGoogleBigQueryTablesRead(d *schema.ResourceData, meta interface{}) error { + + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + datasetID := d.Get("dataset_id").(string) + + project, err := tpgresource.GetProject(d, config) + + if err != nil { + return fmt.Errorf("Error fetching project: %s", err) + } + + params := make(map[string]string) + tables := make([]map[string]interface{}, 0) + + for { + + url, err := tpgresource.ReplaceVars(d, config, "{{BigQueryBasePath}}projects/{{project}}/datasets/{{dataset_id}}/tables") + if err != nil { + return err + } + + url, err = transport_tpg.AddQueryParams(url, params) + if err != nil { + return err + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return fmt.Errorf("Error retrieving tables: %s", err) + } + + pageTables := flattenDataSourceGoogleBigQueryTablesList(res["tables"]) + tables = append(tables, pageTables...) + + pToken, ok := res["nextPageToken"] + if ok && pToken != nil && pToken.(string) != "" { + params["pageToken"] = pToken.(string) + } else { + break + } + } + + if err := d.Set("tables", tables); err != nil { + return fmt.Errorf("Error retrieving tables: %s", err) + } + + id := fmt.Sprintf("projects/%s/datasets/%s/tables", project, datasetID) + d.SetId(id) + + return nil +} + +func flattenDataSourceGoogleBigQueryTablesList(res interface{}) []map[string]interface{} { + + if res == nil { + return make([]map[string]interface{}, 0) + } + + ls := res.([]interface{}) + + tables := make([]map[string]interface{}, 0, len(ls)) + + for _, raw := range ls { + output := raw.(map[string]interface{}) + + var mLabels map[string]interface{} + var mTableName string + + if oLabels, ok := output["labels"].(map[string]interface{}); ok { + mLabels = oLabels + } else { + mLabels = make(map[string]interface{}) // Initialize as an empty map if labels are missing + } + + if oTableReference, ok := output["tableReference"].(map[string]interface{}); ok { + if tableID, ok := oTableReference["tableId"].(string); ok { + mTableName = tableID + } + } + tables = append(tables, map[string]interface{}{ + "labels": mLabels, + "table_id": mTableName, + }) + } + + return tables +} diff --git a/mmv1/third_party/terraform/services/bigquery/data_source_google_bigquery_tables_test.go b/mmv1/third_party/terraform/services/bigquery/data_source_google_bigquery_tables_test.go new file mode 100644 index 000000000000..cd21b8ad6b7b --- /dev/null +++ b/mmv1/third_party/terraform/services/bigquery/data_source_google_bigquery_tables_test.go @@ -0,0 +1,66 @@ +package bigquery_test + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccDataSourceGoogleBigqueryTables_basic(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataSourceGoogleBigqueryTables_basic(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("data.google_bigquery_tables.example", "tables.#", "1"), + resource.TestCheckResourceAttr("data.google_bigquery_tables.example", "tables.0.table_id", fmt.Sprintf("tf_test_table_%s", context["random_suffix"])), + resource.TestCheckResourceAttr("data.google_bigquery_tables.example", "tables.0.labels.%", "1"), + resource.TestCheckResourceAttr("data.google_bigquery_tables.example", "tables.0.labels.goog-terraform-provisioned", "true"), + ), + }, + }, + }) +} + +func testAccDataSourceGoogleBigqueryTables_basic(context map[string]interface{}) string { + return acctest.Nprintf(` + + resource "google_bigquery_dataset" "test" { + dataset_id = "tf_test_ds_%{random_suffix}" + friendly_name = "testing" + description = "This is a test description" + location = "US" + default_table_expiration_ms = 3600000 + } + + resource "google_bigquery_table" "test" { + dataset_id = google_bigquery_dataset.test.dataset_id + table_id = "tf_test_table_%{random_suffix}" + deletion_protection = false + schema = <The `tables` block supports: + +* `labels` - User-provided table labels, in key/value pairs. +* `table_id` - The name of the table. + From 5cb12b8ff36eaba7646549498492b2133c96713f Mon Sep 17 00:00:00 2001 From: Zhenhua Li Date: Fri, 6 Sep 2024 11:34:15 -0700 Subject: [PATCH 54/60] Cleanup Go rewrite TODOs and remove unused fields (#11648) --- mmv1/api/async.go | 52 -- mmv1/api/object.go | 85 ---- mmv1/api/product.go | 39 +- mmv1/api/product/version.go | 23 +- mmv1/api/resource.go | 78 +-- mmv1/api/resource/custom_code.go | 2 - mmv1/api/resource/docs.go | 7 - mmv1/api/resource/examples.go | 43 -- mmv1/api/resource/iam_policy.go | 2 - mmv1/api/resource/nested_query.go | 2 - mmv1/api/resource/reference_links.go | 10 +- mmv1/api/resource/sweeper.go | 1 - mmv1/api/resource/validation.go | 1 - mmv1/api/resource_test.go | 24 +- mmv1/api/timeouts.go | 10 +- mmv1/api/type.go | 472 +++--------------- mmv1/api/type_test.go | 56 +-- mmv1/google/string_utils.go | 12 +- mmv1/google/yaml_validator.go | 126 ----- mmv1/main.go | 14 +- .../products/cloudquotas/QuotaPreference.yaml | 1 - .../cloudquotas/go_QuotaPreference.yaml | 1 - mmv1/provider/template_data.go | 86 +--- mmv1/provider/terraform.go | 221 -------- mmv1/templates/terraform/operation.go.tmpl | 4 +- mmv1/templates/terraform/resource.go.tmpl | 4 - 26 files changed, 128 insertions(+), 1248 deletions(-) delete mode 100644 mmv1/api/object.go diff --git a/mmv1/api/async.go b/mmv1/api/async.go index 0905853e80a1..a689259ddad5 100644 --- a/mmv1/api/async.go +++ b/mmv1/api/async.go @@ -23,9 +23,6 @@ import ( // Base class from which other Async classes can inherit. type Async struct { - // Embed YamlValidator object - // google.YamlValidator - // Describes an operation Operation *Operation @@ -40,7 +37,6 @@ type Async struct { PollAsync `yaml:",inline"` } -// def allow?(method) func (a Async) Allow(method string) bool { return slices.Contains(a.Actions, strings.ToLower(method)) } @@ -56,9 +52,7 @@ type Operation struct { OpAsyncOperation `yaml:",inline"` } -// def initialize(path, base_url, wait_ms, timeouts) func NewOperation() *Operation { - // super() op := new(Operation) op.Timeouts = NewTimeouts() return op @@ -86,14 +80,6 @@ type OpAsync struct { IncludeProject bool `yaml:"include_project"` } -// def initialize(operation, result, status, error) -// super() -// @operation = operation -// @result = result -// @status = status -// @error = error -// end - type OpAsyncOperation struct { Kind string @@ -107,19 +93,6 @@ type OpAsyncOperation struct { FullUrl string `yaml:"full_url"` } -// def validate -// super - -// check :kind, type: String -// check :path, type: String -// check :base_url, type: String -// check :wait_ms, type: Integer - -// check :full_url, type: String - -// conflicts %i[base_url full_url] -// end - // Represents the results of an Operation request type OpAsyncResult struct { ResourceInsideResponse bool `yaml:"resource_inside_response"` @@ -127,17 +100,9 @@ type OpAsyncResult struct { Path string } -// def initialize(path = nil, resource_inside_response = nil) -// super() -// @path = path -// @resource_inside_response = resource_inside_response -// end - // Provides information to parse the result response to check operation // status type OpAsyncStatus struct { - // google.YamlValidator - Path string Complete bool @@ -145,13 +110,6 @@ type OpAsyncStatus struct { Allowed []bool } -// def initialize(path, complete, allowed) -// super() -// @path = path -// @complete = complete -// @allowed = allowed -// end - // Provides information on how to retrieve errors of the executed operations type OpAsyncError struct { google.YamlValidator @@ -161,12 +119,6 @@ type OpAsyncError struct { Message string } -// def initialize(path, message) -// super() -// @path = path -// @message = message -// end - // Async implementation for polling in Terraform type PollAsync struct { // Details how to poll for an eventually-consistent resource state. @@ -179,10 +131,6 @@ type PollAsync struct { // deleting a resource CheckResponseFuncAbsence string `yaml:"check_response_func_absence"` - // Custom code to get a poll response, if needed. - // Will default to same logic as Read() to get current resource - CustomPollRead string `yaml:"custom_poll_read"` - // If true, will suppress errors from polling and default to the // result of the final Read() SuppressError bool `yaml:"suppress_error"` diff --git a/mmv1/api/object.go b/mmv1/api/object.go deleted file mode 100644 index 7c1c8f8a821b..000000000000 --- a/mmv1/api/object.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2024 Google Inc. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package api - -// require 'google/extensions' -// require 'google/logger' -// require 'google/yaml_validator' - -// Represents an object that has a (mandatory) name -type NamedObject struct { - // google.YamlValidator - - Name string - - // original value of :name before the provider override happens - // same as :name if not overridden in provider - ApiName string `yaml:"api_name"` -} - -// func (n *Named) string_array(arr) { -// types = arr.map(&:class).uniq -// types.size == 1 && types[0] == String -// } - -// func (n *Named) deep_merge(arr1, arr2) { -// if arr1.nil? -// return arr2 -// end -// if arr2.nil? -// return arr1 -// end - -// // Scopes is an array of standard strings. In which case return the -// // version in the overrides. This allows scopes to be removed rather -// // than allowing for a merge of the two arrays -// if string_array?(arr1) -// return arr2 -// end - -// // Merge any elements that exist in both -// result = arr1.map do |el1| -// other = arr2.select { |el2| el1.name == el2.name }.first -// other.nil? ? el1 : el1.merge(other) -// end - -// // Add any elements of arr2 that don't exist in arr1 -// result + arr2.reject do |el2| -// arr1.any? { |el1| el2.name == el1.name } -// end -// } - -// func (n *Named) merge(other) { -// result = self.class.new -// instance_variables.each do |v| -// result.instance_variable_set(v, instance_variable_get(v)) -// end - -// other.instance_variables.each do |v| -// if other.instance_variable_get(v).instance_of?(Array) -// result.instance_variable_set(v, deep_merge(result.instance_variable_get(v), -// other.instance_variable_get(v))) -// else -// result.instance_variable_set(v, other.instance_variable_get(v)) -// end -// end - -// result -// } - -// func (n *Named) validate() { -// super -// check :name, type: String, required: true -// check :api_name, type: String, default: @name -// } diff --git a/mmv1/api/product.go b/mmv1/api/product.go index f157c057d802..ae357d7da75c 100644 --- a/mmv1/api/product.go +++ b/mmv1/api/product.go @@ -25,14 +25,15 @@ import ( // Represents a product to be managed type Product struct { - NamedObject `yaml:",inline"` - - // Inherited: // The name of the product's API capitalised in the appropriate places. // This isn't just the API name because it doesn't meaningfully separate // words in the api name - "accesscontextmanager" vs "AccessContextManager" // Example inputs: "Compute", "AccessContextManager" - // Name string + Name string + + // original value of :name before the provider override happens + // same as :name if not overridden in provider + ApiName string `yaml:"api_name"` // Display Name: The full name of the GCP product; eg "Cloud Bigtable" DisplayName string `yaml:"display_name"` @@ -41,23 +42,19 @@ type Product struct { // The list of permission scopes available for the service // For example: `https://www.googleapis.com/auth/compute` - Scopes []string // The API versions of this product - Versions []*product.Version // The base URL for the service API endpoint // For example: `https://www.googleapis.com/compute/v1/` - BaseUrl string `yaml:"base_url"` // A function reference designed for the rare case where you // need to use retries in operation calls. Used for the service api // as it enables itself (self referential) and can result in occasional // failures on operation_get. see github.com/hashicorp/terraform-provider-google/issues/9489 - OperationRetry string `yaml:"operation_retry"` Async *Async @@ -82,6 +79,10 @@ func (p *Product) UnmarshalYAML(unmarshal func(any) error) error { } func (p *Product) Validate() { + if len(p.Name) == 0 { + log.Fatalf("Missing `name` for product") + } + // product names must start with a capital for i, ch := range p.Name { if !unicode.IsUpper(ch) { @@ -224,31 +225,9 @@ func (p *Product) TerraformName() string { // Debugging Methods // ==================== -// def to_s -// // relies on the custom to_json definitions -// JSON.pretty_generate(self) -// end - // Prints a dot notation path to where the field is nested within the parent // object when called on a property. eg: parent.meta.label.foo // Redefined on Product to terminate the calls up the parent chain. func (p Product) Lineage() string { return p.Name } - -// def to_json(opts = nil) -// json_out = {} - -// instance_variables.each do |v| -// if v == :@objects -// json_out['@resources'] = objects.to_h { |o| [o.name, o] } -// elsif instance_variable_get(v) == false || instance_variable_get(v).nil? -// // ignore false or missing because omitting them cleans up result -// // and both are the effective defaults of their types -// else -// json_out[v] = instance_variable_get(v) -// end -// end - -// JSON.generate(json_out, opts) -// end diff --git a/mmv1/api/product/version.go b/mmv1/api/product/version.go index c6f3b14f884e..16ef54ae51b0 100644 --- a/mmv1/api/product/version.go +++ b/mmv1/api/product/version.go @@ -19,8 +19,6 @@ import ( "golang.org/x/exp/slices" ) -// require 'api/object' - var ORDER = []string{"ga", "beta", "alpha", "private"} // A version of the API for a given product / API group @@ -28,18 +26,9 @@ var ORDER = []string{"ga", "beta", "alpha", "private"} // a superset of beta, and beta a superset of GA. Each version will have a // different version url. type Version struct { - // TODO: Should embed NamedObject or not? - // < Api::NamedObject - // include Comparable - - // attr_reader CaiBaseUrl string `yaml:"cai_base_url"` - - // attr_accessor - BaseUrl string `yaml:"base_url"` - - // attr_accessor - Name string + BaseUrl string `yaml:"base_url"` + Name string } func (v *Version) Validate(pName string) { @@ -51,14 +40,6 @@ func (v *Version) Validate(pName string) { } } -// def to_s -// "//{name}: //{base_url}" -// end - -// def <=>(other) -// ORDER.index(name) <=> ORDER.index(other.name) if other.is_a?(Version) -// end - func (v *Version) CompareTo(other *Version) int { return slices.Index(ORDER, v.Name) - slices.Index(ORDER, other.Name) } diff --git a/mmv1/api/resource.go b/mmv1/api/resource.go index 5406d75b33fb..eeb0355130ee 100644 --- a/mmv1/api/resource.go +++ b/mmv1/api/resource.go @@ -27,8 +27,11 @@ import ( ) type Resource struct { - // Embed NamedObject - NamedObject `yaml:",inline"` + Name string + + // original value of :name before the provider override happens + // same as :name if not overridden in provider + ApiName string `yaml:"api_name"` // [Required] A description of the resource that's surfaced in provider // documentation. @@ -355,6 +358,10 @@ func (r *Resource) SetDefault(product *Product) { } func (r *Resource) Validate() { + if r.Name == "" { + log.Fatalf("Missing `name` for resource") + } + if r.NestedQuery != nil && r.NestedQuery.IsListOfIds && len(r.Identity) != 1 { log.Fatalf("`is_list_of_ids: true` implies resource has exactly one `identity` property") } @@ -426,9 +433,6 @@ func (r *Resource) Validate() { // Returns all properties and parameters including the ones that are // excluded. This is used for PropertyOverride validation - -// TODO: remove the ruby function name -// def all_properties func (r Resource) AllProperties() []*Type { return google.Concat(r.Properties, r.Parameters) } @@ -439,19 +443,16 @@ func (r Resource) AllPropertiesInVersion() []*Type { }) } -// def properties_with_excluded func (r Resource) PropertiesWithExcluded() []*Type { return r.Properties } -// def properties func (r Resource) UserProperites() []*Type { return google.Reject(r.Properties, func(p *Type) bool { return p.Exclude }) } -// def parameters func (r Resource) UserParameters() []*Type { return google.Reject(r.Parameters, func(p *Type) bool { return p.Exclude @@ -461,20 +462,16 @@ func (r Resource) UserParameters() []*Type { // Return the user-facing properties in client tools; this ends up meaning // both properties and parameters but without any that are excluded due to // version mismatches or manual exclusion - -// def all_user_properties func (r Resource) AllUserProperties() []*Type { return google.Concat(r.UserProperites(), r.UserParameters()) } -// def required_properties func (r Resource) RequiredProperties() []*Type { return google.Select(r.AllUserProperties(), func(p *Type) bool { return p.Required }) } -// def all_nested_properties(props) func (r Resource) AllNestedProperties(props []*Type) []*Type { nested := props for _, prop := range props { @@ -486,7 +483,6 @@ func (r Resource) AllNestedProperties(props []*Type) []*Type { return nested } -// sensitive_props func (r Resource) SensitiveProps() []*Type { props := r.AllNestedProperties(r.RootProperties()) return google.Select(props, func(p *Type) bool { @@ -508,8 +504,6 @@ func (r Resource) SensitivePropsToString() string { // Fingerprints aren't *really" settable properties, but they behave like one. // At Create, they have no value but they can just be read in anyways, and after a Read // they will need to be set in every Update. - -// def settable_properties func (r Resource) SettableProperties() []*Type { props := make([]*Type, 0) @@ -539,8 +533,6 @@ func (r Resource) UnorderedListProperties() []*Type { } // Properties that will be returned in the API body - -// def gettable_properties func (r Resource) GettableProperties() []*Type { return google.Reject(r.AllUserProperties(), func(v *Type) bool { return v.UrlParamOnly @@ -549,8 +541,6 @@ func (r Resource) GettableProperties() []*Type { // Returns the list of top-level properties once any nested objects with flatten_object // set to true have been collapsed - -// def root_properties func (r Resource) RootProperties() []*Type { props := make([]*Type, 0) @@ -566,8 +556,6 @@ func (r Resource) RootProperties() []*Type { // Return the product-level async object, or the resource-specific one // if one exists. - -// def async func (r Resource) GetAsync() *Async { if r.Async != nil { return r.Async @@ -578,8 +566,6 @@ func (r Resource) GetAsync() *Async { // Return the resource-specific identity properties, or a best guess of the // `name` value for the resource. - -// def identity func (r Resource) GetIdentity() []*Type { props := r.AllUserProperties() @@ -600,7 +586,6 @@ func (r Resource) GetIdentity() []*Type { }) } -// def add_labels_related_fields(props, parent) func (r *Resource) AddLabelsRelatedFields(props []*Type, parent *Type) []*Type { for _, p := range props { if p.IsA("KeyValueLabels") { @@ -614,7 +599,6 @@ func (r *Resource) AddLabelsRelatedFields(props []*Type, parent *Type) []*Type { return props } -// def add_labels_fields(props, parent, labels) func (r *Resource) addLabelsFields(props []*Type, parent *Type, labels *Type) []*Type { if parent == nil || parent.FlattenObject { if r.SkipAttributionLabel { @@ -650,7 +634,6 @@ func (r *Resource) HasLabelsField() bool { return false } -// def add_annotations_fields(props, parent, annotations) func (r *Resource) addAnnotationsFields(props []*Type, parent *Type, annotations *Type) []*Type { // The effective_annotations field is used to write to API, @@ -669,7 +652,6 @@ func (r *Resource) addAnnotationsFields(props []*Type, parent *Type, annotations return props } -// def build_effective_labels_field(name, labels) func buildEffectiveLabelsField(name string, labels *Type) *Type { description := fmt.Sprintf("All of %s (key/value pairs) present on the resource in GCP, "+ "including the %s configured through Terraform, other clients and services.", name, name) @@ -690,7 +672,6 @@ func buildEffectiveLabelsField(name string, labels *Type) *Type { return NewProperty(n, name, options) } -// def build_terraform_labels_field(name, parent, labels) func buildTerraformLabelsField(name string, parent *Type, labels *Type) *Type { description := fmt.Sprintf("The combination of %s configured directly on the resource\n"+ " and default %s configured on the provider.", name, name) @@ -714,8 +695,7 @@ func buildTerraformLabelsField(name string, parent *Type, labels *Type) *Type { return NewProperty(n, name, options) } -// // Check if the resource has root "labels" field -// def root_labels? +// Check if the resource has root "labels" field func (r Resource) RootLabels() bool { for _, p := range r.RootProperties() { if p.IsA("KeyValueLabels") { @@ -725,8 +705,7 @@ func (r Resource) RootLabels() bool { return false } -// // Return labels fields that should be added to ImportStateVerifyIgnore -// def ignore_read_labels_fields(props) +// Return labels fields that should be added to ImportStateVerifyIgnore func (r Resource) IgnoreReadLabelsFields(props []*Type) []string { fields := make([]string, 0) for _, p := range props { @@ -741,7 +720,6 @@ func (r Resource) IgnoreReadLabelsFields(props []*Type) []string { return fields } -// def get_labels_field_note(title) func getLabelsFieldNote(title string) string { return fmt.Sprintf( "**Note**: This field is non-authoritative, and will only manage the %s present "+ @@ -757,8 +735,6 @@ func (r Resource) StateMigrationFile() string { // ==================== // Version-related methods // ==================== - -// def min_version func (r Resource) MinVersionObj() *product.Version { if r.MinVersion != "" { return r.ProductMetadata.versionObj(r.MinVersion) @@ -767,7 +743,6 @@ func (r Resource) MinVersionObj() *product.Version { } } -// def not_in_version?(version) func (r Resource) NotInVersion(version *product.Version) bool { return version.CompareTo(r.MinVersionObj()) < 0 } @@ -775,8 +750,6 @@ func (r Resource) NotInVersion(version *product.Version) bool { // Recurses through all nested properties and parameters and changes their // 'exclude' instance variable if the property is at a version below the // one that is passed in. - -// def exclude_if_not_in_version!(version) func (r *Resource) ExcludeIfNotInVersion(version *product.Version) { if !r.Exclude { r.Exclude = r.NotInVersion(version) @@ -804,8 +777,6 @@ func (r *Resource) ExcludeIfNotInVersion(version *product.Version) { // product.base_url + resource.base_url + '/name' // In newer resources there is much less standardisation in terms of value. // Generally for them though, it's the product.base_url + resource.name - -// def self_link_url func (r Resource) SelfLinkUrl() string { s := []string{r.ProductMetadata.BaseUrl, r.SelfLinkUri()} return strings.Join(s, "") @@ -814,8 +785,6 @@ func (r Resource) SelfLinkUrl() string { // Returns the partial uri / relative path of a resource. In newer resources, // this is the name. This fn is named self_link_uri for consistency, but // could otherwise be considered to be "path" - -// def self_link_uri func (r Resource) SelfLinkUri() string { // If the terms in this are not snake-cased, this will require // an override in Terraform. @@ -826,18 +795,15 @@ func (r Resource) SelfLinkUri() string { return strings.Join([]string{r.BaseUrl, "{{name}}"}, "/") } -// def collection_url func (r Resource) CollectionUrl() string { s := []string{r.ProductMetadata.BaseUrl, r.collectionUri()} return strings.Join(s, "") } -// def collection_uri func (r Resource) collectionUri() string { return r.BaseUrl } -// def create_uri func (r Resource) CreateUri() string { if r.CreateUrl != "" { return r.CreateUrl @@ -850,7 +816,6 @@ func (r Resource) CreateUri() string { return r.SelfLinkUri() } -// def update_uri func (r Resource) UpdateUri() string { if r.UpdateUrl != "" { return r.UpdateUrl @@ -859,7 +824,6 @@ func (r Resource) UpdateUri() string { return r.SelfLinkUri() } -// def delete_uri func (r Resource) DeleteUri() string { if r.DeleteUrl != "" { return r.DeleteUrl @@ -868,22 +832,18 @@ func (r Resource) DeleteUri() string { return r.SelfLinkUri() } -// def resource_name func (r Resource) ResourceName() string { return fmt.Sprintf("%s%s", r.ProductMetadata.Name, r.Name) } // Filter the properties to keep only the ones don't have custom update // method and group them by update url & verb. - -// def properties_without_custom_update(properties) func propertiesWithoutCustomUpdate(properties []*Type) []*Type { return google.Select(properties, func(p *Type) bool { return p.UpdateUrl == "" || p.UpdateVerb == "" || p.UpdateVerb == "NOOP" }) } -// def update_body_properties func (r Resource) UpdateBodyProperties() []*Type { updateProp := propertiesWithoutCustomUpdate(r.SettableProperties()) if r.UpdateVerb == "PATCH" { @@ -896,8 +856,6 @@ func (r Resource) UpdateBodyProperties() []*Type { // Handwritten TF Operation objects will be shaped like accessContextManager // while the Google Go Client will have a name like accesscontextmanager - -// def client_name_pascal func (r Resource) ClientNamePascal() string { clientName := r.ProductMetadata.ClientName if clientName == "" { @@ -913,8 +871,6 @@ func (r Resource) PackageName() string { // In order of preference, use TF override, // general defined timeouts, or default Timeouts - -// def timeouts func (r Resource) GetTimeouts() *Timeouts { timeoutsFiltered := r.Timeouts if timeoutsFiltered == nil { @@ -930,7 +886,6 @@ func (r Resource) GetTimeouts() *Timeouts { return timeoutsFiltered } -// def project? func (r Resource) HasProject() bool { return strings.Contains(r.BaseUrl, "{{project}}") || strings.Contains(r.CreateUrl, "{{project}}") } @@ -939,7 +894,6 @@ func (r Resource) IncludeProjectForOperation() bool { return strings.Contains(r.BaseUrl, "{{project}}") || (r.GetAsync().IsA("OpAsync") && r.GetAsync().IncludeProject) } -// def region? func (r Resource) HasRegion() bool { found := false for _, p := range r.Parameters { @@ -951,7 +905,6 @@ func (r Resource) HasRegion() bool { return found && strings.Contains(r.BaseUrl, "{{region}}") } -// def zone? func (r Resource) HasZone() bool { found := false for _, p := range r.Parameters { @@ -963,7 +916,8 @@ func (r Resource) HasZone() bool { return found && strings.Contains(r.BaseUrl, "{{zone}}") } -// resource functions needed for template that previously existed in terraform.go but due to how files are being inherited here it was easier to put in here +// resource functions needed for template that previously existed in terraform.go +// but due to how files are being inherited here it was easier to put in here // taken wholesale from tpgtools func (r Resource) Updatable() bool { if !r.Immutable { @@ -984,8 +938,6 @@ func (r Resource) Updatable() bool { // Prints a dot notation path to where the field is nested within the parent // object when called on a property. eg: parent.meta.label.foo // Redefined on Resource to terminate the calls up the parent chain. - -// def lineage func (r Resource) Lineage() string { return r.Name } @@ -1137,7 +1089,6 @@ func (r *Resource) SetCompiler(t string) { // Returns the id format of an object, or self_link_uri if none is explicitly defined // We prefer the long name of a resource as the id so that users can reference // resources in a standard way, and most APIs accept short name, long name or self_link -// def id_format(object) func (r Resource) GetIdFormat() string { idFormat := r.IdFormat if idFormat == "" { @@ -1150,7 +1101,6 @@ func (r Resource) GetIdFormat() string { // Template Methods // ==================== // Functions used to create slices of resource properties that could not otherwise be called from within generating templates. - func (r Resource) ReadProperties() []*Type { return google.Reject(r.GettableProperties(), func(p *Type) bool { return p.IgnoreRead @@ -1230,7 +1180,6 @@ func (r Resource) IamResourceUriStringQualifiers() string { // For example, for the url "projects/{{project}}/schemas/{{schema}}", // the identifiers are "project", "schema". -// def extract_identifiers(url) func (r Resource) ExtractIdentifiers(url string) []string { matches := regexp.MustCompile(`\{\{%?(\w+)\}\}`).FindAllStringSubmatch(url, -1) var result []string @@ -1554,7 +1503,6 @@ type UpdateGroup struct { FingerprintName string } -// def properties_without_custom_update(properties) func (r Resource) propertiesWithCustomUpdate(properties []*Type) []*Type { return google.Reject(properties, func(p *Type) bool { return p.UpdateUrl == "" || p.UpdateVerb == "" || p.UpdateVerb == "NOOP" || diff --git a/mmv1/api/resource/custom_code.go b/mmv1/api/resource/custom_code.go index a00d09ce5467..e45dfc8e8ca3 100644 --- a/mmv1/api/resource/custom_code.go +++ b/mmv1/api/resource/custom_code.go @@ -15,8 +15,6 @@ package resource // Inserts custom code into terraform resources. type CustomCode struct { - // google.YamlValidator - // Collection of fields allowed in the CustomCode section for // Terraform. diff --git a/mmv1/api/resource/docs.go b/mmv1/api/resource/docs.go index 2de8004d2350..1d5d76769131 100644 --- a/mmv1/api/resource/docs.go +++ b/mmv1/api/resource/docs.go @@ -15,8 +15,6 @@ package resource // Inserts custom strings into terraform resource docs. type Docs struct { - // google.YamlValidator - // All these values should be strings, which will be inserted // directly into the terraform resource documentation. The // strings should _not_ be the names of template files @@ -26,18 +24,13 @@ type Docs struct { // template. // The text will be injected at the bottom of the specified // section. - // attr_reader : Warning string - // attr_reader : Note string - // attr_reader : RequiredProperties string `yaml:"required_properties"` - // attr_reader : OptionalProperties string `yaml:"optional_properties"` - // attr_reader : Attributes string } diff --git a/mmv1/api/resource/examples.go b/mmv1/api/resource/examples.go index 8d618b7b94fb..5c82ade99b00 100644 --- a/mmv1/api/resource/examples.go +++ b/mmv1/api/resource/examples.go @@ -31,11 +31,6 @@ import ( // Generates configs to be shown as examples in docs and outputted as tests // from a shared template type Examples struct { - // google.YamlValidator - - // include Compile::Core - // include Google::GolangUtils - // The name of the example in lower snake_case. // Generally takes the form of the resource name followed by some detail // about the specific test. For example, "address_with_subnetwork". @@ -320,9 +315,6 @@ func ExecuteTemplate(e any, templatePath string, appendNewline bool) string { func (e *Examples) OiCSLink() string { v := url.Values{} - // TODO Q2: Values.Encode() sorts the values by key alphabetically. This will produce - // diffs for every URL when we convert to using this function. We should sort the - // Ruby-version query alphabetically beforehand to remove these diffs. v.Add("cloudshell_git_repo", "https://github.com/terraform-google-modules/docs-examples.git") v.Add("cloudshell_working_dir", e.Name) v.Add("cloudshell_image", "gcr.io/cloudshell-images/cloudshell:latest") @@ -367,38 +359,3 @@ func SubstituteTestPaths(config string) string { config = strings.ReplaceAll(config, "path/to/id_rsa.pub", "test-fixtures/ssh_rsa.pub") return config } - -// func (e *Examples) validate() { -// super -// check :name, type: String, required: true -// check :primary_resource_id, type: String -// check :min_version, type: String -// check :vars, type: Hash -// check :test_env_vars, type: Hash -// check :test_vars_overrides, type: Hash -// check :ignore_read_extra, type: Array, item_type: String, default: [] -// check :primary_resource_name, type: String -// check :skip_test, type: TrueClass -// check :skip_import_test, type: TrueClass -// check :skip_docs, type: TrueClass -// check :config_path, type: String, default: "templates/terraform/examples///{name}.tf.erb" -// check :skip_vcr, type: TrueClass -// } - -// func (e *Examples) merge(other) { -// result = self.class.new -// instance_variables.each do |v| -// result.instance_variable_set(v, instance_variable_get(v)) -// end - -// other.instance_variables.each do |v| -// if other.instance_variable_get(v).instance_of?(Array) -// result.instance_variable_set(v, deep_merge(result.instance_variable_get(v), -// other.instance_variable_get(v))) -// else -// result.instance_variable_set(v, other.instance_variable_get(v)) -// end -// end - -// result -// } diff --git a/mmv1/api/resource/iam_policy.go b/mmv1/api/resource/iam_policy.go index 18708e48fd57..bd384fb7a85a 100644 --- a/mmv1/api/resource/iam_policy.go +++ b/mmv1/api/resource/iam_policy.go @@ -23,8 +23,6 @@ import ( // and accessed via their parent resource // See: https://cloud.google.com/iam/docs/overview type IamPolicy struct { - // google.YamlValidator - // boolean of if this binding should be generated Exclude bool diff --git a/mmv1/api/resource/nested_query.go b/mmv1/api/resource/nested_query.go index a0ebea0198e1..0523fef4a494 100644 --- a/mmv1/api/resource/nested_query.go +++ b/mmv1/api/resource/nested_query.go @@ -19,8 +19,6 @@ import "log" // a list of resources or single object within the parent. // e.g. Fine-grained resources type NestedQuery struct { - // google.YamlValidator - // A list of keys to traverse in order. // i.e. backendBucket --> cdnPolicy.signedUrlKeyNames // should be ["cdnPolicy", "signedUrlKeyNames"] diff --git a/mmv1/api/resource/reference_links.go b/mmv1/api/resource/reference_links.go index 5c4862bbaae8..472e7ef4499b 100644 --- a/mmv1/api/resource/reference_links.go +++ b/mmv1/api/resource/reference_links.go @@ -16,14 +16,10 @@ package resource // Represents a list of documentation links. type ReferenceLinks struct { // guides containing - // name: The title of the link - // value: The URL to navigate on click - - //attr_reader + // name: The title of the link + // value: The URL to navigate on click Guides map[string]string - // the url of the API guide - - //attr_reader + // the url of the API guider Api string } diff --git a/mmv1/api/resource/sweeper.go b/mmv1/api/resource/sweeper.go index ebc078c5a770..44a30f598e83 100644 --- a/mmv1/api/resource/sweeper.go +++ b/mmv1/api/resource/sweeper.go @@ -14,7 +14,6 @@ package resource type Sweeper struct { - //Google::YamlValidator // The field checked by sweeper to determine // eligibility for deletion for generated resources SweepableIdentifierField string `yaml:"sweepable_identifier_field"` diff --git a/mmv1/api/resource/validation.go b/mmv1/api/resource/validation.go index 24cbfb557758..7214827327ed 100644 --- a/mmv1/api/resource/validation.go +++ b/mmv1/api/resource/validation.go @@ -15,7 +15,6 @@ package resource // Support for schema ValidateFunc functionality. type Validation struct { - //Google::YamlValidator // Ensures the value matches this regex Regex string Function string diff --git a/mmv1/api/resource_test.go b/mmv1/api/resource_test.go index a9cb0c3c4a65..d52d7313a70e 100644 --- a/mmv1/api/resource_test.go +++ b/mmv1/api/resource_test.go @@ -10,9 +10,7 @@ import ( func TestResourceMinVersionObj(t *testing.T) { t.Parallel() p := Product{ - NamedObject: NamedObject{ - Name: "test", - }, + Name: "test", Versions: []*product.Version{ &product.Version{ Name: "beta", @@ -37,9 +35,7 @@ func TestResourceMinVersionObj(t *testing.T) { { description: "resource minVersion is empty", obj: Resource{ - NamedObject: NamedObject{ - Name: "test", - }, + Name: "test", MinVersion: "", ProductMetadata: &p, }, @@ -48,9 +44,7 @@ func TestResourceMinVersionObj(t *testing.T) { { description: "resource minVersion is not empty", obj: Resource{ - NamedObject: NamedObject{ - Name: "test", - }, + Name: "test", MinVersion: "beta", ProductMetadata: &p, }, @@ -76,9 +70,7 @@ func TestResourceMinVersionObj(t *testing.T) { func TestResourceNotInVersion(t *testing.T) { t.Parallel() p := Product{ - NamedObject: NamedObject{ - Name: "test", - }, + Name: "test", Versions: []*product.Version{ &product.Version{ Name: "beta", @@ -104,9 +96,7 @@ func TestResourceNotInVersion(t *testing.T) { { description: "ga is in version if MinVersion is empty", obj: Resource{ - NamedObject: NamedObject{ - Name: "test", - }, + Name: "test", MinVersion: "", ProductMetadata: &p, }, @@ -118,9 +108,7 @@ func TestResourceNotInVersion(t *testing.T) { { description: "ga is not in version if MinVersion is beta", obj: Resource{ - NamedObject: NamedObject{ - Name: "test", - }, + Name: "test", MinVersion: "beta", ProductMetadata: &p, }, diff --git a/mmv1/api/timeouts.go b/mmv1/api/timeouts.go index 93380777e660..8fe0df0aa970 100644 --- a/mmv1/api/timeouts.go +++ b/mmv1/api/timeouts.go @@ -13,7 +13,8 @@ package api -// Default timeout for all operation types is 20, the Terraform default (https://www.terraform.io/plugin/sdkv2/resources/retries-and-customizable-timeouts) +// Default timeout for all operation types is 20, the Terraform default +// (https://www.terraform.io/plugin/sdkv2/resources/retries-and-customizable-timeouts) // minutes. This can be overridden for each resource. const DEFAULT_INSERT_TIMEOUT_MINUTES = 20 const DEFAULT_UPDATE_TIMEOUT_MINUTES = 20 @@ -21,18 +22,11 @@ const DEFAULT_DELETE_TIMEOUT_MINUTES = 20 // Provides timeout information for the different operation types type Timeouts struct { - // google.YamlValidator InsertMinutes int `yaml:"insert_minutes"` UpdateMinutes int `yaml:"update_minutes"` DeleteMinutes int `yaml:"delete_minutes"` } -// def initialize -// super - -// validate -// end - func NewTimeouts() *Timeouts { return &Timeouts{ InsertMinutes: DEFAULT_INSERT_TIMEOUT_MINUTES, diff --git a/mmv1/api/type.go b/mmv1/api/type.go index 0558be2cbb74..9408b41cbe38 100644 --- a/mmv1/api/type.go +++ b/mmv1/api/type.go @@ -26,12 +26,15 @@ import ( // Represents a property type type Type struct { - NamedObject `yaml:",inline"` + Name string - // TODO: improve the parsing of properties based on type in resource yaml files. + // original value of :name before the provider override happens + // same as :name if not overridden in provider + ApiName string `yaml:"api_name"` + + // TODO rewrite: improve the parsing of properties based on type in resource yaml files. Type string - // TODO: set a specific type intead of interface{} DefaultValue interface{} `yaml:"default_value"` Description string @@ -128,10 +131,6 @@ type Type struct { // Can only be overridden - we should never set this ourselves. NewType string - // A pattern that maps expected user input to expected API input. - // TODO: remove? - Pattern string - Properties []*Type EnumValues []string `yaml:"enum_values"` @@ -147,7 +146,6 @@ type Type struct { // Adds a ValidateFunc to the item schema ItemValidation resource.Validation `yaml:"item_validation"` - // __name ParentName string // ==================== @@ -226,6 +224,7 @@ type Type struct { // ==================== // KeyValuePairs Fields // ==================== + // Ignore writing the "effective_labels" and "effective_annotations" fields to API. IgnoreWrite bool `yaml:"ignore_write"` // ==================== @@ -298,6 +297,7 @@ func (t *Type) SetDefault(r *Resource) { switch { case t.IsA("Array"): + t.ItemType.Name = t.Name t.ItemType.ParentName = t.Name t.ItemType.ParentMetadata = t t.ItemType.SetDefault(r) @@ -338,6 +338,10 @@ func (t *Type) SetDefault(r *Resource) { } func (t *Type) Validate(rName string) { + if t.Name == "" { + log.Fatalf("Missing `name` for proprty with type %s in resource %s", t.Type, rName) + } + if t.Output && t.Required { log.Fatalf("Property %s cannot be output and required at the same time in resource %s.", t.Name, rName) } @@ -346,6 +350,8 @@ func (t *Type) Validate(rName string) { log.Fatalf("'default_value' and 'default_from_api' cannot be both set in resource %s", rName) } + t.validateLabelsField() + switch { case t.IsA("Array"): t.ItemType.Validate(rName) @@ -359,70 +365,21 @@ func (t *Type) Validate(rName string) { } } -// super -// check :description, type: ::String, required: true -// check :exclude, type: :boolean, default: false, required: true -// check :deprecation_message, type: ::String -// check :removed_message, type: ::String -// check :min_version, type: ::String -// check :exact_version, type: ::String -// check :output, type: :boolean -// check :required, type: :boolean -// check :send_empty_value, type: :boolean -// check :allow_empty_object, type: :boolean -// check :url_param_only, type: :boolean -// check :read_query_params, type: ::String -// check :immutable, type: :boolean - -// raise 'Property cannot be output and required at the same time.' \ -// if @output && @required - -// check :update_verb, type: Symbol, allowed: %i[POST PUT PATCH NONE], -// default: @__resource&.update_verb - -// check :update_url, type: ::String -// check :update_id, type: ::String -// check :fingerprint_name, type: ::String -// check :pattern, type: ::String - +// TODO rewrite: add validations +// check :description, required: true +// check :update_verb, allowed: %i[POST PUT PATCH NONE], // check_default_value_property // check_conflicts // check_at_least_one_of // check_exactly_one_of // check_required_with - -// check :sensitive, type: :boolean, default: false -// check :is_set, type: :boolean, default: false -// check :default_from_api, type: :boolean, default: false -// check :unordered_list, type: :boolean, default: false -// check :schema_config_mode_attr, type: :boolean, default: false - -// // technically set as a default everywhere, but only maps will use this. -// check :key_expander, type: ::String, default: 'tpgresource.ExpandString' -// check :key_diff_suppress_func, type: ::String - -// check :diff_suppress_func, type: ::String -// check :state_func, type: ::String -// check :validation, type: Provider::Terraform::Validation -// check :set_hash_func, type: ::String - -// check :custom_flatten, type: ::String -// check :custom_expand, type: ::String - -// raise "'default_value' and 'default_from_api' cannot be both set" \ -// if @default_from_api && !@default_value.nil? -// } - -// func (t *Type) to_s() { -// JSON.pretty_generate(self) -// } +// check the allowed types for Type field +// check the allowed fields for each type, for example, KeyName is only allowed for Map // Prints a dot notation path to where the field is nested within the parent // object. eg: parent.meta.label.foo // The only intended purpose is to allow better error messages. Some objects // and at some points in the build this doesn't output a valid output. - -// def lineage func (t Type) Lineage() string { if t.ParentMetadata == nil { return google.Underscore(t.Name) @@ -433,7 +390,6 @@ func (t Type) Lineage() string { // Prints the access path of the field in the configration eg: metadata.0.labels // The only intended purpose is to get the value of the labes field by calling d.Get(). -// func (t *Type) terraform_lineage() { func (t Type) TerraformLineage() string { if t.ParentMetadata == nil || t.ParentMetadata.FlattenObject { return google.Underscore(t.Name) @@ -456,7 +412,6 @@ func (t Type) EnumValuesToString(quoteSeperator string, addEmpty bool) string { return strings.Join(values, ", ") } -// def titlelize_property(property) func (t Type) TitlelizeProperty() string { return google.Camelize(t.Name, "upper") } @@ -496,36 +451,7 @@ func (t Type) ResourceType() string { return path[len(path)-1] } -// func (t *Type) to_json(opts) { -// ignore fields that will contain references to parent resources and -// those which will be added later -// ignored_fields = %i[@resource @__parent @__resource @api_name @update_verb -// @__name @name @properties] -// json_out = {} - -// instance_variables.each do |v| -// if v == :@conflicts && instance_variable_get(v).empty? -// // ignore empty conflict arrays -// elsif v == :@at_least_one_of && instance_variable_get(v).empty? -// // ignore empty at_least_one_of arrays -// elsif v == :@exactly_one_of && instance_variable_get(v).empty? -// // ignore empty exactly_one_of arrays -// elsif v == :@required_with && instance_variable_get(v).empty? -// // ignore empty required_with arrays -// elsif instance_variable_get(v) == false || instance_variable_get(v).nil? -// // ignore false booleans as non-existence indicates falsey -// elsif !ignored_fields.include? v -// json_out[v] = instance_variable_get(v) -// end -// end - -// // convert properties to a hash based on name for nested readability -// json_out.merge!(properties&.map { |p| [p.name, p] }.to_h) \ -// if respond_to? 'properties' - -// JSON.generate(json_out, opts) -// } - +// TODO rewrite: validation // func (t *Type) check_default_value_property() { // return if @default_value.nil? @@ -569,6 +495,7 @@ func (t Type) Conflicting() []string { return t.Conflicts } +// TODO rewrite: validation // Checks that all properties that needs at least one of their fields actually exist. // This currently just returns if empty, because we don't want to do the check, since // this list will have a full path for nested attributes. @@ -588,6 +515,7 @@ func (t Type) AtLeastOneOfList() []string { return t.AtLeastOneOf } +// TODO rewrite: validation // Checks that all properties that needs exactly one of their fields actually exist. // This currently just returns if empty, because we don't want to do the check, since // this list will have a full path for nested attributes. @@ -607,6 +535,7 @@ func (t Type) ExactlyOneOfList() []string { return t.ExactlyOneOf } +// TODO rewrite: validation // Checks that all properties that needs required with their fields actually exist. // This currently just returns if empty, because we don't want to do the check, since // this list will have a full path for nested attributes. @@ -617,7 +546,6 @@ func (t Type) ExactlyOneOfList() []string { // } // Returns list of properties that needs required with their fields set. -// func (t *Type) required_with_list() { func (t Type) RequiredWithList() []string { if t.ResourceMetadata == nil { return []string{} @@ -630,7 +558,6 @@ func (t Type) Parent() *Type { return t.ParentMetadata } -// def min_version func (t Type) MinVersionObj() *product.Version { if t.MinVersion != "" { return t.ResourceMetadata.ProductMetadata.versionObj(t.MinVersion) @@ -639,7 +566,6 @@ func (t Type) MinVersionObj() *product.Version { } } -// def exact_version func (t *Type) exactVersionObj() *product.Version { if t.ExactVersion == "" { return nil @@ -648,7 +574,6 @@ func (t *Type) exactVersionObj() *product.Version { return t.ResourceMetadata.ProductMetadata.versionObj(t.ExactVersion) } -// def exclude_if_not_in_version!(version) func (t *Type) ExcludeIfNotInVersion(version *product.Version) { if !t.Exclude { if versionObj := t.exactVersionObj(); versionObj != nil { @@ -669,12 +594,6 @@ func (t *Type) ExcludeIfNotInVersion(version *product.Version) { } } -// Overriding is_a? to enable class overrides. -// Ruby does not let you natively change types, so this is the next best -// thing. - -// TODO Q1: check the type of superclasses of property t -// func (t *Type) is_a?(clazz) { func (t Type) IsA(clazz string) bool { if clazz == "" { log.Fatalf("class cannot be empty") @@ -685,20 +604,9 @@ func (t Type) IsA(clazz string) bool { } return t.Type == clazz - // super(clazz) } -// // Overriding class to enable class overrides. -// // Ruby does not let you natively change types, so this is the next best -// // thing. -// func (t *Type) class() { -// // return Module.const_get(@new_type) if @new_type - -// // super -// } - // Returns nested properties for this property. -// def nested_properties func (t Type) NestedProperties() []*Type { props := make([]*Type, 0) @@ -720,12 +628,10 @@ func (t Type) NestedProperties() []*Type { return props } -// def removed? func (t Type) Removed() bool { return t.RemovedMessage != "" } -// def deprecated? func (t Type) Deprecated() bool { return t.DeprecationMessage != "" } @@ -734,68 +640,10 @@ func (t *Type) GetDescription() string { return strings.TrimSpace(strings.TrimRight(t.Description, "\n")) } -// // private - -// // A constant value to be provided as field -// type Constant struct { -// // < Type -// value - -// func (t *Type) validate -// @description = "This is always //{value}." -// super -// end -// } - -// // Represents a primitive (non-composite) type. -// class Primitive < Type -// end - -// // Represents a boolean -// class Boolean < Primitive -// end - -// // Represents an integer -// class Integer < Primitive -// end - -// // Represents a double -// class Double < Primitive -// end - -// // Represents a string -// class String < Primitive -// func (t *Type) initialize(name = nil) -// super() - -// @name = name -// end - -// PROJECT = Api::Type::String.new('project') -// NAME = Api::Type::String.new('name') -// end - -// // Properties that are fetched externally -// class FetchedExternal < Type - -// func (t *Type) validate -// @conflicts ||= [] -// @at_least_one_of ||= [] -// @exactly_one_of ||= [] -// @required_with ||= [] -// end - -// func (t *Type) api_name -// name -// end -// end - -// class Path < Primitive -// end - -// // Represents a fingerprint. A fingerprint is an output-only -// // field used for optimistic locking during updates. -// // They are fetched from the GCP response. +// TODO rewrite: validation +// Represents a fingerprint. A fingerprint is an output-only +// field used for optimistic locking during updates. +// They are fetched from the GCP response. // class Fingerprint < FetchedExternal // func (t *Type) validate // super @@ -803,36 +651,8 @@ func (t *Type) GetDescription() string { // end // end -// // Represents a timestamp -// class Time < Primitive -// end - -// // A base class to tag objects that are composed by other objects (arrays, -// // nested objects, etc) -// class Composite < Type -// end - -// // Forwarding declaration to allow defining Array::NESTED_ARRAY_TYPE -// class NestedObject < Composite -// end - -// // Forwarding declaration to allow defining Array::RREF_ARRAY_TYPE -// class ResourceRef < Type -// end - -// // Represents an array, and stores its items' type +// TODO rewrite: validation // class Array < Composite -// item_type -// min_size -// max_size - -// func (t *Type) validate -// super -// if @item_type.is_a?(NestedObject) || @item_type.is_a?(ResourceRef) -// @item_type.set_variable(@name, :__name) -// @item_type.set_variable(@__resource, :__resource) -// @item_type.set_variable(self, :__parent) -// end // check :item_type, type: [::String, NestedObject, ResourceRef, Enum], required: true // unless @item_type.is_a?(NestedObject) || @item_type.is_a?(ResourceRef) \ @@ -840,25 +660,7 @@ func (t *Type) GetDescription() string { // raise "Invalid type //{@item_type}" // end -// check :min_size, type: ::Integer -// check :max_size, type: ::Integer -// end - -// func (t *Type) exclude_if_not_in_version!(version) -// super -// @item_type.exclude_if_not_in_version!(version) \ -// if @item_type.is_a? NestedObject -// end - -// func (t *Type) nested_properties -// return @item_type.nested_properties.reject(&:exclude) \ -// if @item_type.is_a?(Api::Type::NestedObject) - -// super -// end - // This function is for array field -// def item_type_class func (t Type) ItemTypeClass() string { if !t.IsA("Array") { return "" @@ -906,6 +708,7 @@ func (t Type) TFType(s string) string { return "schema.TypeString" } +// TODO rewrite: validation // // Represents an enum, and store is valid values // class Enum < Primitive // values @@ -917,40 +720,6 @@ func (t Type) TFType(s string) string { // check :skip_docs_values, type: :boolean // end -// func (t *Type) merge(other) -// result = self.class.new -// instance_variables.each do |v| -// result.instance_variable_set(v, instance_variable_get(v)) -// end - -// other.instance_variables.each do |v| -// if other.instance_variable_get(v).instance_of?(Array) -// result.instance_variable_set(v, deep_merge(result.instance_variable_get(v), -// other.instance_variable_get(v))) -// else -// result.instance_variable_set(v, other.instance_variable_get(v)) -// end -// end - -// result -// end -// end - -// // Represents a 'selfLink' property, which returns the URI of the resource. -// class SelfLink < FetchedExternal -// EXPORT_KEY = 'selfLink'.freeze - -// resource - -// func (t *Type) name -// EXPORT_KEY -// end - -// func (t *Type) out_name -// EXPORT_KEY.underscore -// end -// end - // // Represents a reference to another resource // class ResourceRef < Type // // The fields which can be overridden in provider.yaml. @@ -977,7 +746,6 @@ func (t Type) TFType(s string) string { // check_resource_ref_property_exists // end -// func (t *Type) resource_ref func (t Type) ResourceRef() *Resource { if !t.IsA("ResourceRef") { return nil @@ -991,8 +759,7 @@ func (t Type) ResourceRef() *Resource { return resources[0] } -// private - +// TODO rewrite: validation // func (t *Type) check_resource_ref_property_exists // return unless defined?(resource_ref.all_user_properties) @@ -1023,12 +790,10 @@ func (t Type) ResourceRef() *Resource { // Returns all properties including the ones that are excluded // This is used for PropertyOverride validation -// def all_properties func (t Type) AllProperties() []*Type { return t.Properties } -// func (t *Type) properties func (t Type) UserProperties() []*Type { if t.IsA("NestedObject") { if t.Properties == nil { @@ -1044,8 +809,6 @@ func (t Type) UserProperties() []*Type { // Returns the list of top-level properties once any nested objects with // flatten_object set to true have been collapsed -// -// func (t *Type) root_properties func (t *Type) RootProperties() []*Type { props := make([]*Type, 0) for _, p := range t.UserProperties() { @@ -1058,23 +821,14 @@ func (t *Type) RootProperties() []*Type { return props } -// func (t *Type) exclude_if_not_in_version!(version) -// super -// @properties.each { |p| p.exclude_if_not_in_version!(version) } -// end -// end - // An array of string -> string key -> value pairs, such as labels. // While this is technically a map, it's split out because it's a much // simpler property to generate and means we can avoid conditional logic // in Map. - func NewProperty(name, apiName string, options []func(*Type)) *Type { p := &Type{ - NamedObject: NamedObject{ - Name: name, - ApiName: apiName, - }, + Name: name, + ApiName: apiName, } for _, option := range options { @@ -1137,80 +891,51 @@ func propertyWithIgnoreWrite(ignoreWrite bool) func(*Type) { } } -// class KeyValuePairs < Composite -// // Ignore writing the "effective_labels" and "effective_annotations" fields to API. -// ignore_write - -// func (t *Type) initialize(name: nil, output: nil, api_name: nil, description: nil, min_version: nil, -// ignore_write: nil, update_verb: nil, update_url: nil, immutable: nil) -// super() - -// @name = name -// @output = output -// @api_name = api_name -// @description = description -// @min_version = min_version -// @ignore_write = ignore_write -// @update_verb = update_verb -// @update_url = update_url -// @immutable = immutable -// end - -// func (t *Type) validate -// super -// check :ignore_write, type: :boolean, default: false - -// return if @__resource.__product.nil? - -// product_name = @__resource.__product.name -// resource_name = @__resource.name +func (t *Type) validateLabelsField() { + productName := t.ResourceMetadata.ProductMetadata.Name + resourceName := t.ResourceMetadata.Name + lineage := t.Lineage() + if lineage == "labels" || lineage == "metadata.labels" || lineage == "configuration.labels" { + if !t.IsA("KeyValueLabels") && + // The label value must be empty string, so skip this resource + !(productName == "CloudIdentity" && resourceName == "Group") && -// if lineage == 'labels' || lineage == 'metadata.labels' || -// lineage == 'configuration.labels' -// if !(is_a? Api::Type::KeyValueLabels) && -// // The label value must be empty string, so skip this resource -// !(product_name == 'CloudIdentity' && resource_name == 'Group') && + // The "labels" field has type Array, so skip this resource + !(productName == "DeploymentManager" && resourceName == "Deployment") && -// // The "labels" field has type Array, so skip this resource -// !(product_name == 'DeploymentManager' && resource_name == 'Deployment') && + // https://github.com/hashicorp/terraform-provider-google/issues/16219 + !(productName == "Edgenetwork" && resourceName == "Network") && -// // https://github.com/hashicorp/terraform-provider-google/issues/16219 -// !(product_name == 'Edgenetwork' && resource_name == 'Network') && + // https://github.com/hashicorp/terraform-provider-google/issues/16219 + !(productName == "Edgenetwork" && resourceName == "Subnet") && -// // https://github.com/hashicorp/terraform-provider-google/issues/16219 -// !(product_name == 'Edgenetwork' && resource_name == 'Subnet') && + // "userLabels" is the resource labels field + !(productName == "Monitoring" && resourceName == "NotificationChannel") && -// // "userLabels" is the resource labels field -// !(product_name == 'Monitoring' && resource_name == 'NotificationChannel') && - -// // The "labels" field has type Array, so skip this resource -// !(product_name == 'Monitoring' && resource_name == 'MetricDescriptor') -// raise "Please use type KeyValueLabels for field //{lineage} " \ -// "in resource //{product_name}///{resource_name}" -// end -// elsif is_a? Api::Type::KeyValueLabels -// raise "Please don't use type KeyValueLabels for field //{lineage} " \ -// "in resource //{product_name}///{resource_name}" -// end + // The "labels" field has type Array, so skip this resource + !(productName == "Monitoring" && resourceName == "MetricDescriptor") { + log.Fatalf("Please use type KeyValueLabels for field %s in resource %s/%s", lineage, productName, resourceName) + } + } else if t.IsA("KeyValueLabels") { + log.Fatalf("Please don't use type KeyValueLabels for field %s in resource %s/%s", lineage, productName, resourceName) + } -// if lineage == 'annotations' || lineage == 'metadata.annotations' -// if !(is_a? Api::Type::KeyValueAnnotations) && -// // The "annotations" field has "ouput: true", so skip this eap resource -// !(product_name == 'Gkeonprem' && resource_name == 'BareMetalAdminClusterEnrollment') -// raise "Please use type KeyValueAnnotations for field //{lineage} " \ -// "in resource //{product_name}///{resource_name}" -// end -// elsif is_a? Api::Type::KeyValueAnnotations -// raise "Please don't use type KeyValueAnnotations for field //{lineage} " \ -// "in resource //{product_name}///{resource_name}" -// end -// end + if lineage == "annotations" || lineage == "metadata.annotations" { + if !t.IsA("KeyValueAnnotations") && + // The "annotations" field has "ouput: true", so skip this eap resource + !(productName == "Gkeonprem" && resourceName == "BareMetalAdminClusterEnrollment") { + log.Fatalf("Please use type KeyValueAnnotations for field %s in resource %s/%s", lineage, productName, resourceName) + } + } else if t.IsA("KeyValueAnnotations") { + log.Fatalf("Please don't use type KeyValueAnnotations for field %s in resource %s/%s", lineage, productName, resourceName) + } +} -// def field_min_version func (t Type) fieldMinVersion() string { return t.MinVersion } +// TODO rewrite: validation // // An array of string -> string key -> value pairs used specifically for the "labels" field. // // The field name with this type should be "labels" literally. // class KeyValueLabels < KeyValuePairs @@ -1243,65 +968,17 @@ func (t Type) fieldMinVersion() string { // end // end +// TODO rewrite: validation // // Map from string keys -> nested object entries // class Map < Composite -// // .yaml. -// module Fields -// // The type definition of the contents of the map. -// value_type - -// // While the API doesn't give keys an explicit name, we specify one -// // because in Terraform the key has to be a property of the object. -// // -// // The name of the key. Used in the Terraform schema as a field name. -// key_name - -// // A description of the key's format. Used in Terraform to describe -// // the field in documentation. -// key_description -// end -// include Fields - // func (t *Type) validate // super // check :key_name, type: ::String, required: true // check :key_description, type: ::String - -// @value_type.set_variable(@name, :__name) -// @value_type.set_variable(@__resource, :__resource) -// @value_type.set_variable(self, :__parent) // check :value_type, type: Api::Type::NestedObject, required: true // raise "Invalid type //{@value_type}" unless type?(@value_type) // end -// func (t *Type) nested_properties -// @value_type.nested_properties.reject(&:exclude) -// end -// end - -// // Support for schema ValidateFunc functionality. -// class Validation < Object -// // Ensures the value matches this regex -// regex -// function - -// func (t *Type) validate -// super - -// check :regex, type: String -// check :function, type: String -// end -// end - -// func (t *Type) type?(type) -// type.is_a?(Type) || !get_type(type).nil? -// end - -// func (t *Type) get_type(type) -// Module.const_get(type) -// end - -// def property_ns_prefix func (t Type) PropertyNsPrefix() []string { return []string{ "Google", @@ -1312,7 +989,6 @@ func (t Type) PropertyNsPrefix() []string { // "Namespace" - prefix with product and resource - a property with // information from the "object" variable - func (t Type) NamespaceProperty() string { name := google.Camelize(t.Name, "upper") p := t @@ -1324,18 +1000,6 @@ func (t Type) NamespaceProperty() string { return fmt.Sprintf("%s%s%s", google.Camelize(t.ResourceMetadata.ProductMetadata.ApiName, "lower"), t.ResourceMetadata.Name, name) } -// def namespace_property_from_object(property, object) -// -// name = property.name.camelize -// until property.parent.nil? -// property = property.parent -// name = property.name.camelize + name -// end -// -// "#{property.__resource.__product.api_name.camelize(:lower)}#{object.name}#{name}" -// -// end - func (t Type) CustomTemplate(templatePath string, appendNewline bool) string { return resource.ExecuteTemplate(&t, templatePath, appendNewline) } @@ -1368,7 +1032,6 @@ func (t *Type) GoLiteral(value interface{}) string { } } -// def force_new?(property, resource) func (t *Type) IsForceNew() bool { if t.IsA("KeyValueLabels") && t.ResourceMetadata.RootLabels() { return false @@ -1403,7 +1066,6 @@ func (t *Type) IsForceNew() bool { // TODO(emilymye): Change format of input for // exactly_one_of/at_least_one_of/etc to use camelcase, MM properities and // convert to snake in this method -// def get_property_schema_path(schema_path, resource) func (t *Type) GetPropertySchemaPath(schemaPath string) string { nestedProps := t.ResourceMetadata.UserProperites() diff --git a/mmv1/api/type_test.go b/mmv1/api/type_test.go index 86c3e2e57b1e..9d592d35a765 100644 --- a/mmv1/api/type_test.go +++ b/mmv1/api/type_test.go @@ -11,9 +11,7 @@ func TestTypeMinVersionObj(t *testing.T) { t.Parallel() p := Product{ - NamedObject: NamedObject{ - Name: "test", - }, + Name: "test", Versions: []*product.Version{ &product.Version{ Name: "beta", @@ -38,14 +36,10 @@ func TestTypeMinVersionObj(t *testing.T) { { description: "type minVersion is empty and resource minVersion is empty", obj: Type{ - NamedObject: NamedObject{ - Name: "test", - }, + Name: "test", MinVersion: "", ResourceMetadata: &Resource{ - NamedObject: NamedObject{ - Name: "test", - }, + Name: "test", MinVersion: "", ProductMetadata: &p, }, @@ -55,14 +49,10 @@ func TestTypeMinVersionObj(t *testing.T) { { description: "type minVersion is empty and resource minVersion is beta", obj: Type{ - NamedObject: NamedObject{ - Name: "test", - }, + Name: "test", MinVersion: "", ResourceMetadata: &Resource{ - NamedObject: NamedObject{ - Name: "test", - }, + Name: "test", MinVersion: "beta", ProductMetadata: &p, }, @@ -72,14 +62,10 @@ func TestTypeMinVersionObj(t *testing.T) { { description: "type minVersion is not empty", obj: Type{ - NamedObject: NamedObject{ - Name: "test", - }, + Name: "test", MinVersion: "beta", ResourceMetadata: &Resource{ - NamedObject: NamedObject{ - Name: "test", - }, + Name: "test", MinVersion: "", ProductMetadata: &p, }, @@ -107,9 +93,7 @@ func TestTypeExcludeIfNotInVersion(t *testing.T) { t.Parallel() p := Product{ - NamedObject: NamedObject{ - Name: "test", - }, + Name: "test", Versions: []*product.Version{ &product.Version{ Name: "beta", @@ -135,15 +119,11 @@ func TestTypeExcludeIfNotInVersion(t *testing.T) { { description: "type has Exclude true", obj: Type{ - NamedObject: NamedObject{ - Name: "test", - }, + Name: "test", Exclude: true, MinVersion: "", ResourceMetadata: &Resource{ - NamedObject: NamedObject{ - Name: "test", - }, + Name: "test", MinVersion: "", ProductMetadata: &p, }, @@ -156,16 +136,12 @@ func TestTypeExcludeIfNotInVersion(t *testing.T) { { description: "type has Exclude false and not empty ExactVersion", obj: Type{ - NamedObject: NamedObject{ - Name: "test", - }, + Name: "test", MinVersion: "", Exclude: false, ExactVersion: "beta", ResourceMetadata: &Resource{ - NamedObject: NamedObject{ - Name: "test", - }, + Name: "test", MinVersion: "beta", ProductMetadata: &p, }, @@ -178,16 +154,12 @@ func TestTypeExcludeIfNotInVersion(t *testing.T) { { description: "type has Exclude false and empty ExactVersion", obj: Type{ - NamedObject: NamedObject{ - Name: "test", - }, + Name: "test", MinVersion: "beta", Exclude: false, ExactVersion: "", ResourceMetadata: &Resource{ - NamedObject: NamedObject{ - Name: "test", - }, + Name: "test", MinVersion: "", ProductMetadata: &p, }, diff --git a/mmv1/google/string_utils.go b/mmv1/google/string_utils.go index 63d8fce9c683..338f9b9f4d8a 100644 --- a/mmv1/google/string_utils.go +++ b/mmv1/google/string_utils.go @@ -49,8 +49,7 @@ func SpaceSeparated(source string) string { return tmp } -// // Converts a string to space-separated capitalized words -// def self.title(source) +// Converts a string to space-separated capitalized words func SpaceSeparatedTitle(source string) string { ss := SpaceSeparated(source) return strings.Title(ss) @@ -58,8 +57,6 @@ func SpaceSeparatedTitle(source string) string { // Returns all the characters up until the period (.) or returns text // unchanged if there is no period. -// -// def self.first_sentence(text) func FirstSentence(text string) string { re := regexp.MustCompile(`[.?!]`) periodPos := re.FindStringIndex(text) @@ -117,11 +114,6 @@ func Camelize(term string, firstLetter string) string { return strings.Title(match) }) } else { - // TODO: rewrite with the regular expression. Lookahead(?=) is not supported in Go - // acronymsCamelizeRegex := regexp.MustCompile(`^(?:(?=a)b(?=\b|[A-Z_])|\w)`) - // res = acronymsCamelizeRegex.ReplaceAllStringFunc(res, func(match string) string { - // return strings.ToLower(match) - // }) if len(res) != 0 { r := []rune(res) r[0] = unicode.ToLower(r[0]) @@ -155,7 +147,7 @@ aren't common in JS-based regex flavours, but are in Perl-based ones func Format2Regex(format string) string { re := regexp.MustCompile(`\{\{%([[:word:]]+)\}\}`) result := re.ReplaceAllStringFunc(format, func(match string) string { - // TODO: the trims may not be needed with more effecient regex + // TODO rewrite: the trims may not be needed with more effecient regex word := strings.TrimPrefix(match, "{{") word = strings.TrimSuffix(word, "}}") word = strings.ReplaceAll(word, "%", "") diff --git a/mmv1/google/yaml_validator.go b/mmv1/google/yaml_validator.go index 811246cea98e..31a7e86e2a3f 100644 --- a/mmv1/google/yaml_validator.go +++ b/mmv1/google/yaml_validator.go @@ -23,133 +23,7 @@ import ( type YamlValidator struct{} func (v *YamlValidator) Parse(content []byte, obj interface{}, yamlPath string) { - // TODO(nelsonjr): Allow specifying which symbols to restrict it further. - // But it requires inspecting all configuration files for symbol sources, - // such as Enum values. Leaving it as a nice-to-have for the future. if err := yaml.UnmarshalStrict(content, obj); err != nil { log.Fatalf("Cannot unmarshal data from file %s: %v", yamlPath, err) } } - -// func (v *YamlValidator) allowed_classes() { -// ObjectSpace.each_object(Class).select do |klass| -// klass < Google::YamlValidator -// end.push(Time, Symbol) -// } - -// func (v *YamlValidator) validate() { -// Google::LOGGER.debug "Validating //{self.class} '//{@name}'" -// check_extraneous_properties -// } - -// func (v *YamlValidator) set_variable(value, property) { -// Google::LOGGER.debug "Setting variable of //{value} to //{self}" -// instance_variable_set("@//{property}", value) -// } - -// Does all validation checking for a particular variable. -// options: -// :default - the default value for this variable if its nil -// :type - the allowed types (single or array) that this value can be -// :item_type - the allowed types that all values in this array should be -// (implied that type == array) -// :allowed - the allowed values that this non-array variable should be. -// :required - is the variable required? (defaults: false) -// func (v *YamlValidator) check(variable, **opts) { -// value = instance_variable_get("@//{variable}") - -// // Set default value. -// if !opts[:default].nil? && value.nil? -// instance_variable_set("@//{variable}", opts[:default]) -// value = instance_variable_get("@//{variable}") -// end - -// // Check if value is required. Print nested path if available. -// lineage_path = respond_to?('lineage') ? lineage : '' -// raise "//{lineage_path} > Missing '//{variable}'" if value.nil? && opts[:required] -// return if value.nil? - -// // Check type -// check_property_value(variable, value, opts[:type]) if opts[:type] - -// // Check item_type -// if value.is_a?(Array) -// raise "//{lineage_path} > //{variable} must have item_type on arrays" unless opts[:item_type] - -// value.each_with_index do |o, index| -// check_property_value("//{variable}[//{index}]", o, opts[:item_type]) -// end -// end - -// // Check if value is allowed -// return unless opts[:allowed] -// raise "//{value} on //{variable} should be one of //{opts[:allowed]}" \ -// unless opts[:allowed].include?(value) -// } - -// func (v *YamlValidator) conflicts(list) { -// value_checked = false -// list.each do |item| -// next if instance_variable_get("@//{item}").nil? -// raise "//{list.join(',')} cannot be set at the same time" if value_checked - -// value_checked = true -// end -// } - -// private - -// func (v *YamlValidator) check_type(name, object, type) { -// if type == :boolean -// return unless [TrueClass, FalseClass].find_index(object.class).nil? -// elsif type.is_a? ::Array -// return if type.find_index(:boolean) && [TrueClass, FalseClass].find_index(object.class) -// return unless type.find_index(object.class).nil? -// // check if class is or inherits from type -// elsif object.class <= type -// return -// end -// raise "Property '//{name}' is '//{object.class}' instead of '//{type}'" -// } - -// func (v *YamlValidator) log_check_type(object) { -// if object.respond_to?(:name) -// Google::LOGGER.debug "Checking object //{object.name}" -// else -// Google::LOGGER.debug "Checking object //{object}" -// end -// } - -// func (v *YamlValidator) check_property_value(property, prop_value, type) { -// Google::LOGGER.debug "Checking '//{property}' on //{object_display_name}" -// check_type property, prop_value, type unless type.nil? -// prop_value.validate if prop_value.is_a?(Api::Object) -// } - -// func (v *YamlValidator) check_extraneous_properties() { -// instance_variables.each do |variable| -// var_name = variable.id2name[1..] -// next if var_name.start_with?('__') - -// Google::LOGGER.debug "Validating '//{var_name}' on //{object_display_name}" -// raise "Extraneous variable '//{var_name}' in //{object_display_name}" \ -// unless methods.include?(var_name.intern) -// end -// } - -// func (v *YamlValidator) set_variables(objects, property) { -// return if objects.nil? - -// objects.each do |object| -// object.set_variable(self, property) if object.respond_to?(:set_variable) -// end -// } - -// func (v *YamlValidator) ensure_property_does_not_exist(property) { -// raise "Conflict of property '//{property}' for object '//{self}'" \ -// unless instance_variable_get("@//{property}").nil? -// } - -// func (v *YamlValidator) object_display_name() { -// "//{@name}" -// } diff --git a/mmv1/main.go b/mmv1/main.go index fbedea96bd5d..f23b54cf683f 100644 --- a/mmv1/main.go +++ b/mmv1/main.go @@ -20,7 +20,7 @@ import ( var wg sync.WaitGroup -// TODO Q2: additional flags +// TODO rewrite: additional flags // Example usage: --output $GOPATH/src/github.com/terraform-providers/terraform-provider-google-beta var outputPath = flag.String("output", "", "path to output generated files to") @@ -86,7 +86,7 @@ func main() { dir := filepath.Dir(filePath) allProductFiles = append(allProductFiles, fmt.Sprintf("products/%s", filepath.Base(dir))) } - // TODO Q2: override directory + // TODO rewrite: override directory if allProducts { productsToGenerate = allProductFiles @@ -140,7 +140,7 @@ func main() { if generateCode { providerToGenerate.CompileCommonFiles(*outputPath, productsForVersion, "") - // TODO Q2: product overrides + // TODO rewrite: product overrides } } @@ -151,13 +151,13 @@ func GenerateProduct(productChannel chan string, providerToGenerate *provider.Te productYamlPath := path.Join(productName, "go_product.yaml") - // TODO Q2: uncomment the error check that if the product.yaml exists for each product + // TODO rewrite: uncomment the error check that if the product.yaml exists for each product // after Go-converted product.yaml files are complete for all products // if _, err := os.Stat(productYamlPath); errors.Is(err, os.ErrNotExist) { // log.Fatalf("%s does not contain a product.yaml file", productName) // } - // TODO Q2: product overrides + // TODO rewrite: product overrides if _, err := os.Stat(productYamlPath); err == nil { var resources []*api.Resource = make([]*api.Resource, 0) @@ -194,7 +194,7 @@ func GenerateProduct(productChannel chan string, providerToGenerate *provider.Te resources = append(resources, resource) } - // TODO Q2: override resources + // TODO rewrite: override resources // Sort resources by name sort.Slice(resources, func(i, j int) bool { @@ -204,7 +204,7 @@ func GenerateProduct(productChannel chan string, providerToGenerate *provider.Te productApi.Objects = resources productApi.Validate() - // TODO Q2: set other providers via flag + // TODO rewrite: set other providers via flag providerToGenerate = provider.NewTerraform(productApi, *version, startTime) *productsForVersion = append(*productsForVersion, productApi) diff --git a/mmv1/products/cloudquotas/QuotaPreference.yaml b/mmv1/products/cloudquotas/QuotaPreference.yaml index b31af9878597..caba3b5b9d59 100644 --- a/mmv1/products/cloudquotas/QuotaPreference.yaml +++ b/mmv1/products/cloudquotas/QuotaPreference.yaml @@ -62,7 +62,6 @@ properties: - !ruby/object:Api::Type::String name: 'name' default_from_api: true - pattern: '{{parent}}/locations/global/quotaPreferences/{{name}}' description: | The resource name of the quota preference. Required except in the CREATE requests. diff_suppress_func: 'tpgresource.CompareSelfLinkOrResourceName' diff --git a/mmv1/products/cloudquotas/go_QuotaPreference.yaml b/mmv1/products/cloudquotas/go_QuotaPreference.yaml index 2a48249d5ca1..0e368b1b0eac 100644 --- a/mmv1/products/cloudquotas/go_QuotaPreference.yaml +++ b/mmv1/products/cloudquotas/go_QuotaPreference.yaml @@ -66,7 +66,6 @@ properties: type: String description: | The resource name of the quota preference. Required except in the CREATE requests. - pattern: '{{parent}}/locations/global/quotaPreferences/{{name}}' default_from_api: true diff_suppress_func: 'tpgresource.CompareSelfLinkOrResourceName' custom_flatten: 'templates/terraform/custom_flatten/go/name_from_self_link.tmpl' diff --git a/mmv1/provider/template_data.go b/mmv1/provider/template_data.go index 8824e2a9ae94..10e04969058d 100644 --- a/mmv1/provider/template_data.go +++ b/mmv1/provider/template_data.go @@ -31,15 +31,13 @@ import ( ) type TemplateData struct { - // include Compile::Core - OutputFolder string VersionName string TerraformResourceDirectory string TerraformProviderModule string - // TODO Q2: is this needed? + // TODO rewrite: is this needed? // # Information about the local environment // # (which formatters are enabled, start-time) // attr_accessor :env @@ -209,88 +207,6 @@ func (td *TemplateData) GenerateFile(filePath, templatePath string, input any, g } } -// # path is the output name of the file -// # template is used to determine metadata about the file based on how it is -// # generated -// def format_output_file(path, template) -// return unless path.end_with?('.go') && @env[:goformat_enabled] - -// run_formatter("gofmt -w -s #{path}") -// run_formatter("goimports -w #{path}") unless template.include?('third_party/terraform') -// end - -// def run_formatter(command) -// output = %x(#{command} 2>&1) -// Google::LOGGER.error output unless $CHILD_STATUS.to_i.zero? -// end - -// def relative_path(target, base) -// Pathname.new(target).relative_path_from(Pathname.new(base)) -// end -// end - -// # Responsible for compiling provider-level files, rather than product-specific ones -// class ProviderFileTemplate < Provider::FileTemplate -// # All the products that are being compiled with the provider on this run -// attr_accessor :products - -// # Optional path to the directory where overrides reside. Used to locate files -// # outside of the MM root directory -// attr_accessor :override_path - -// def initialize(output_folder, version, env, products, override_path = nil) -// super() - -// @output_folder = output_folder -// @version = version -// @env = env -// @products = products -// @override_path = override_path -// end -// end - -// # Responsible for generating a file in the context of a product -// # with a given set of parameters. -// class ProductFileTemplate < Provider::FileTemplate -// # The name of the resource -// attr_accessor :name -// # The resource itself. -// attr_accessor :object -// # The entire API object. -// attr_accessor :product - -// class << self -// # Construct a new ProductFileTemplate based on a resource object -// def file_for_resource(output_folder, object, version, env) -// file_template = new(output_folder, object.name, object.__product, version, env) -// file_template.object = object -// file_template -// end -// end - -// def initialize(output_folder, name, product, version, env) -// super() - -// @name = name -// @product = product -// @output_folder = output_folder -// @version = version -// @env = env -// end -// end -// end - -// def import_path -// case @target_version_name -// when 'ga' -// "#{TERRAFORM_PROVIDER_GA}/#{RESOURCE_DIRECTORY_GA}" -// when 'beta' -// "#{TERRAFORM_PROVIDER_BETA}/#{RESOURCE_DIRECTORY_BETA}" -// else -// "#{TERRAFORM_PROVIDER_PRIVATE}/#{RESOURCE_DIRECTORY_PRIVATE}" -// end -// end - func (td *TemplateData) ImportPath() string { if td.VersionName == GA_VERSION { return "github.com/hashicorp/terraform-provider-google/google" diff --git a/mmv1/provider/terraform.go b/mmv1/provider/terraform.go index 427f151f2bef..a204d7575ad5 100644 --- a/mmv1/provider/terraform.go +++ b/mmv1/provider/terraform.go @@ -197,7 +197,6 @@ func (t *Terraform) GenerateOperation(outputFolder string) { // Generate the IAM policy for this object. This is used to query and test // IAM policies separately from the resource itself -// def generate_iam_policy(pwd, data, generate_code, generate_docs) func (t *Terraform) GenerateIamPolicy(object api.Resource, templateData TemplateData, outputFolder string, generateCode, generateDocs bool) { if generateCode && object.IamPolicy != nil && (object.IamPolicy.MinVersion == "" || slices.Index(product.ORDER, object.IamPolicy.MinVersion) <= slices.Index(product.ORDER, t.TargetVersionName)) { productName := t.Product.ApiName @@ -222,7 +221,6 @@ func (t *Terraform) GenerateIamPolicy(object api.Resource, templateData Template } } -// def generate_iam_documentation(pwd, data) func (t *Terraform) GenerateIamDocumentation(object api.Resource, templateData TemplateData, outputFolder string, generateCode, generateDocs bool) { resourceDocFolder := path.Join(outputFolder, "website", "docs", "r") if err := os.MkdirAll(resourceDocFolder, os.ModePerm); err != nil { @@ -269,7 +267,6 @@ func (t *Terraform) FullResourceName(object api.Resource) string { return fmt.Sprintf("%s_%s", productName, name) } -// def copy_common_files(output_folder, generate_code, generate_docs, provider_name = nil) func (t Terraform) CopyCommonFiles(outputFolder string, generateCode, generateDocs bool) { log.Printf("Copying common files for %s", t.providerName()) @@ -346,7 +343,6 @@ func (t Terraform) getCopyFilesInFolder(folderPath, targetDir string) map[string return m } -// def copy_file_list(output_folder, files) func (t Terraform) CopyFileList(outputFolder string, files map[string]string) { for target, source := range files { targetFile := filepath.Join(outputFolder, target) @@ -383,13 +379,6 @@ func (t Terraform) CopyFileList(outputFolder string, files map[string]string) { } // Compiles files that are shared at the provider level -// -// def compile_common_files( -// output_folder, -// products, -// common_compile_file, -// override_path = nil -// ) func (t Terraform) CompileCommonFiles(outputFolder string, products []*api.Product, overridePath string) { t.generateResourcesForVersion(products) files := t.getCommonCompileFiles(t.TargetVersionName) @@ -453,7 +442,6 @@ func (t Terraform) getCompileFilesInFolder(folderPath, targetDir string) map[str return m } -// def compile_file_list(output_folder, files, file_template, pwd = Dir.pwd) func (t Terraform) CompileFileList(outputFolder string, files map[string]string, fileTemplate TemplateData, products []*api.Product) { providerWithProducts := ProviderWithProducts{ Terraform: t, @@ -483,7 +471,6 @@ func (t Terraform) CompileFileList(outputFolder string, files map[string]string, } } -// def add_hashicorp_copyright_header(output_folder, target) func (t Terraform) addHashicorpCopyRightHeader(outputFolder, target string) { if !expectedOutputFolder(outputFolder) { log.Printf("Unexpected output folder (%s) detected"+ @@ -550,7 +537,6 @@ func (t Terraform) addHashicorpCopyRightHeader(outputFolder, target string) { } } -// def expected_output_folder?(output_folder) func expectedOutputFolder(outputFolder string) bool { expectedFolders := []string{"terraform-provider-google", "terraform-provider-google-beta", "terraform-next", "terraform-google-conversion", "tfplan2cai"} folderName := filepath.Base(outputFolder) // Possible issue with Windows OS @@ -565,7 +551,6 @@ func expectedOutputFolder(outputFolder string) bool { return isExpected } -// def replace_import_path(output_folder, target) func (t Terraform) replaceImportPath(outputFolder, target string) { targetFile := filepath.Join(outputFolder, target) sourceByte, err := os.ReadFile(targetFile) @@ -649,7 +634,6 @@ func (t Terraform) ProviderFromVersion() string { // Gets the list of services dependent on the version ga, beta, and private // If there are some resources of a servcie is in GA, // then this service is in GA. Otherwise, the service is in BETA -// def get_mmv1_services_in_version(products, version) func (t Terraform) GetMmv1ServicesInVersion(products []*api.Product) []string { var services []string for _, product := range products { @@ -675,198 +659,6 @@ func (t Terraform) GetMmv1ServicesInVersion(products []*api.Product) []string { return services } -// def generate_newyaml(pwd, data) -// -// # @api.api_name is the service folder name -// product_name = @api.api_name -// target_folder = File.join(folder_name(data.version), 'services', product_name) -// FileUtils.mkpath target_folder -// data.generate(pwd, -// '/templates/terraform/yaml_conversion.erb', -// "#{target_folder}/go_#{data.object.name}.yaml", -// self) -// return if File.exist?("#{target_folder}/go_product.yaml") -// -// data.generate(pwd, -// '/templates/terraform/product_yaml_conversion.erb', -// "#{target_folder}/go_product.yaml", -// self) -// -// end -// -// def build_env -// -// { -// goformat_enabled: @go_format_enabled, -// start_time: @start_time -// } -// -// end -// -// # used to determine and separate objects that have update methods -// # that target individual fields -// def field_specific_update_methods(properties) -// -// properties_by_custom_update(properties).length.positive? -// -// end -// -// # Filter the properties to keep only the ones requiring custom update -// # method and group them by update url & verb. -// def properties_by_custom_update(properties) -// -// update_props = properties.reject do |p| -// p.update_url.nil? || p.update_verb.nil? || p.update_verb == :NOOP || -// p.is_a?(Api::Type::KeyValueTerraformLabels) || -// p.is_a?(Api::Type::KeyValueLabels) # effective_labels is used for update -// end -// -// update_props.group_by do |p| -// { -// update_url: p.update_url, -// update_verb: p.update_verb, -// update_id: p.update_id, -// fingerprint_name: p.fingerprint_name -// } -// end -// -// end -// -// # Filter the properties to keep only the ones don't have custom update -// # method and group them by update url & verb. -// def properties_without_custom_update(properties) -// -// properties.select do |p| -// p.update_url.nil? || p.update_verb.nil? || p.update_verb == :NOOP -// end -// -// end -// -// # Takes a update_url and returns the list of custom updatable properties -// # that can be updated at that URL. This allows flattened objects -// # to determine which parent property in the API should be updated with -// # the contents of the flattened object -// def custom_update_properties_by_key(properties, key) -// -// properties_by_custom_update(properties).select do |k, _| -// k[:update_url] == key[:update_url] && -// k[:update_id] == key[:update_id] && -// k[:fingerprint_name] == key[:fingerprint_name] -// end.first.last -// # .first is to grab the element from the select which returns a list -// # .last is because properties_by_custom_update returns a list of -// # [{update_url}, [properties,...]] and we only need the 2nd part -// -// end -// -// def update_url(resource, url_part) -// -// [resource.__product.base_url, update_uri(resource, url_part)].flatten.join -// -// end -// -// def generating_hashicorp_repo? -// -// # The default Provider is used to generate TPG and TPGB in HashiCorp-owned repos. -// # The compiler deviates from the default behaviour with a -f flag to produce -// # non-HashiCorp downstreams. -// true -// -// end -// -// # ProductFileTemplate with Terraform specific fields -// class TerraformProductFileTemplate < Provider::ProductFileTemplate -// -// # The async object used for making operations. -// # We assume that all resources share the same async properties. -// attr_accessor :async -// -// # When generating OiCS examples, we attach the example we're -// # generating to the data object. -// attr_accessor :example -// -// attr_accessor :resource_name -// -// end -// -// # Sorts properties in the order they should appear in the TF schema: -// # Required, Optional, Computed -// def order_properties(properties) -// -// properties.select(&:required).sort_by(&:name) + -// properties.reject(&:required).reject(&:output).sort_by(&:name) + -// properties.select(&:output).sort_by(&:name) -// -// end -// -// def tf_type(property) -// -// tf_types[property.class] -// -// end -// -// # Converts between the Magic Modules type of an object and its type in the -// # TF schema -// def tf_types -// -// { -// Api::Type::Boolean => 'schema.TypeBool', -// Api::Type::Double => 'schema.TypeFloat', -// Api::Type::Integer => 'schema.TypeInt', -// Api::Type::String => 'schema.TypeString', -// # Anonymous string property used in array of strings. -// 'Api::Type::String' => 'schema.TypeString', -// Api::Type::Time => 'schema.TypeString', -// Api::Type::Enum => 'schema.TypeString', -// Api::Type::ResourceRef => 'schema.TypeString', -// Api::Type::NestedObject => 'schema.TypeList', -// Api::Type::Array => 'schema.TypeList', -// Api::Type::KeyValuePairs => 'schema.TypeMap', -// Api::Type::KeyValueLabels => 'schema.TypeMap', -// Api::Type::KeyValueTerraformLabels => 'schema.TypeMap', -// Api::Type::KeyValueEffectiveLabels => 'schema.TypeMap', -// Api::Type::KeyValueAnnotations => 'schema.TypeMap', -// Api::Type::Map => 'schema.TypeSet', -// Api::Type::Fingerprint => 'schema.TypeString' -// } -// -// end -// -// def updatable?(resource, properties) -// -// !resource.immutable || !properties.reject { |p| p.update_url.nil? }.empty? -// -// end -// -// # Returns tuples of (fieldName, list of update masks) for -// # top-level updatable fields. Schema path refers to a given Terraform -// # field name (e.g. d.GetChange('fieldName)') -// def get_property_update_masks_groups(properties, mask_prefix: ”) -// -// mask_groups = [] -// properties.each do |prop| -// if prop.flatten_object -// mask_groups += get_property_update_masks_groups( -// prop.properties, mask_prefix: "#{prop.api_name}." -// ) -// elsif prop.update_mask_fields -// mask_groups << [prop.name.underscore, prop.update_mask_fields] -// else -// mask_groups << [prop.name.underscore, [mask_prefix + prop.api_name]] -// end -// end -// mask_groups -// -// end -// -// # Capitalize the first letter of a property name. -// # E.g. "creationTimestamp" becomes "CreationTimestamp". -// def titlelize_property(property) -// -// property.name.camelize(:upper) -// -// end -// // # Generates the list of resources, and gets the count of resources and iam resources // # dependent on the version ga, beta or private. // # The resource object has the format @@ -877,7 +669,6 @@ func (t Terraform) GetMmv1ServicesInVersion(products []*api.Product) []string { // # } // # The variable resources_for_version is used to generate resources in file // # mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb -// def generate_resources_for_version(products, version) func (t *Terraform) generateResourcesForVersion(products []*api.Product) { for _, productDefinition := range products { service := strings.ToLower(productDefinition.Name) @@ -910,20 +701,16 @@ func (t *Terraform) generateResourcesForVersion(products []*api.Product) { }) } } - - // @resources_for_version = @resources_for_version.compact } // # TODO(nelsonjr): Review all object interfaces and move to private methods // # that should not be exposed outside the object hierarchy. -// def provider_name func (t Terraform) providerName() string { return reflect.TypeOf(t).Name() } // # Adapted from the method used in templating // # See: mmv1/compile/core.rb -// def comment_block(text, lang) func commentBlock(text []string, lang string) string { var headers []string switch lang { @@ -953,7 +740,6 @@ func commentText(text []string, symbols string) []string { return header } -// def language_from_filename(filename) func languageFromFilename(filename string) string { switch extension := filepath.Ext(filename); extension { case ".go": @@ -967,13 +753,6 @@ func languageFromFilename(filename string) string { } } -// # Returns the id format of an object, or self_link_uri if none is explicitly defined -// # We prefer the long name of a resource as the id so that users can reference -// # resources in a standard way, and most APIs accept short name, long name or self_link -// def id_format(object) -// object.id_format || object.self_link_uri -// end - // Returns the extension for DCL packages for the given version. This is needed // as the DCL uses "alpha" for preview resources, while we use "private" func (t Terraform) DCLVersion() string { diff --git a/mmv1/templates/terraform/operation.go.tmpl b/mmv1/templates/terraform/operation.go.tmpl index a7a435a672f9..3a778b8f8e2d 100644 --- a/mmv1/templates/terraform/operation.go.tmpl +++ b/mmv1/templates/terraform/operation.go.tmpl @@ -1,4 +1,4 @@ -{{/* TODO: if hc_downstream */ -}} +{{/* TODO rewrite: if hc_downstream */ -}} // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 @@ -94,7 +94,7 @@ func create{{ $.ProductMetadata.Name }}Waiter(config *transport_tpg.Config, op m Might as well just nolint it so we can pass the linter checks. */}} -// nolint: deadcode,unused {{/* TODO: remove the comment */}} +// nolint: deadcode,unused {{/* TODO rewrite: remove the comment */}} func {{ camelize $.ProductMetadata.Name "upper" }}OperationWaitTimeWithResponse(config *transport_tpg.Config, op map[string]interface{}, response *map[string]interface{},{{- if $.IncludeProjectForOperation }} project,{{- end }} activity, userAgent string, timeout time.Duration) error { w, err := create{{ $.ProductMetadata.Name }}Waiter(config, op, {{- if $.IncludeProjectForOperation }} project, {{ end }} activity, userAgent) if err != nil { diff --git a/mmv1/templates/terraform/resource.go.tmpl b/mmv1/templates/terraform/resource.go.tmpl index 28230e200075..bbbfc17802eb 100644 --- a/mmv1/templates/terraform/resource.go.tmpl +++ b/mmv1/templates/terraform/resource.go.tmpl @@ -420,9 +420,6 @@ func resource{{ $.ResourceName -}}Create(d *schema.ResourceData, meta interface{ {{if and ($.GetAsync) ($.GetAsync.IsA "PollAsync")}} func resource{{ $.ResourceName -}}PollRead(d *schema.ResourceData, meta interface{}) transport_tpg.PollReadFunc { return func() (map[string]interface{}, error) { -{{if $.GetAsync.CustomPollRead -}} -//TODO custom poll read -{{ else -}} config := meta.(*transport_tpg.Config) @@ -504,7 +501,6 @@ func resource{{ $.ResourceName -}}PollRead(d *schema.ResourceData, meta interfac } {{ end }} return res, nil -{{ end -}} } } {{ end }} From 40b03fd9444330e853c295259fd06325b9aaf04a Mon Sep 17 00:00:00 2001 From: "Stephen Lewis (Burrows)" Date: Fri, 6 Sep 2024 12:25:54 -0700 Subject: [PATCH 55/60] Scc rename to remove incorrect plural resources (#11657) --- ...ueryExports.yaml => FolderSccBigQueryExport.yaml} | 2 +- ...eryExports.yaml => ProjectSccBigQueryExport.yaml} | 2 +- ...yExports.yaml => go_FolderSccBigQueryExport.yaml} | 2 +- ...Exports.yaml => go_ProjectSccBigQueryExport.yaml} | 2 +- ...c_v2_folder_big_query_export_config_basic.tf.tmpl | 2 +- ..._v2_project_big_query_export_config_basic.tf.tmpl | 2 +- ...cc_v2_folder_big_query_export_config_basic.tf.erb | 2 +- ...c_v2_project_big_query_export_config_basic.tf.erb | 2 +- ...rce_scc_v2_folder_big_query_export_config_test.go | 12 ++++++------ ...ce_scc_v2_project_big_query_export_config_test.go | 8 ++++---- 10 files changed, 18 insertions(+), 18 deletions(-) rename mmv1/products/securitycenterv2/{FolderSccBigQueryExports.yaml => FolderSccBigQueryExport.yaml} (99%) rename mmv1/products/securitycenterv2/{ProjectSccBigQueryExports.yaml => ProjectSccBigQueryExport.yaml} (99%) rename mmv1/products/securitycenterv2/{go_FolderSccBigQueryExports.yaml => go_FolderSccBigQueryExport.yaml} (99%) rename mmv1/products/securitycenterv2/{go_ProjectSccBigQueryExports.yaml => go_ProjectSccBigQueryExport.yaml} (99%) diff --git a/mmv1/products/securitycenterv2/FolderSccBigQueryExports.yaml b/mmv1/products/securitycenterv2/FolderSccBigQueryExport.yaml similarity index 99% rename from mmv1/products/securitycenterv2/FolderSccBigQueryExports.yaml rename to mmv1/products/securitycenterv2/FolderSccBigQueryExport.yaml index 98813ed865d0..8f1171ae3b8e 100644 --- a/mmv1/products/securitycenterv2/FolderSccBigQueryExports.yaml +++ b/mmv1/products/securitycenterv2/FolderSccBigQueryExport.yaml @@ -12,7 +12,7 @@ # limitations under the License. --- !ruby/object:Api::Resource -name: 'FolderSccBigQueryExports' +name: 'FolderSccBigQueryExport' base_url: folders/{{folder}}/locations/{{location}}/bigQueryExports self_link: folders/{{folder}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}} create_url: folders/{{folder}}/locations/{{location}}/bigQueryExports?bigQueryExportId={{big_query_export_id}} diff --git a/mmv1/products/securitycenterv2/ProjectSccBigQueryExports.yaml b/mmv1/products/securitycenterv2/ProjectSccBigQueryExport.yaml similarity index 99% rename from mmv1/products/securitycenterv2/ProjectSccBigQueryExports.yaml rename to mmv1/products/securitycenterv2/ProjectSccBigQueryExport.yaml index 52b2261aa31b..457cc33d09a0 100644 --- a/mmv1/products/securitycenterv2/ProjectSccBigQueryExports.yaml +++ b/mmv1/products/securitycenterv2/ProjectSccBigQueryExport.yaml @@ -12,7 +12,7 @@ # limitations under the License. --- !ruby/object:Api::Resource -name: 'ProjectSccBigQueryExports' +name: 'ProjectSccBigQueryExport' base_url: projects/{{project}}/locations/{{location}}/bigQueryExports self_link: projects/{{project}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}} create_url: projects/{{project}}/locations/{{location}}/bigQueryExports?bigQueryExportId={{big_query_export_id}} diff --git a/mmv1/products/securitycenterv2/go_FolderSccBigQueryExports.yaml b/mmv1/products/securitycenterv2/go_FolderSccBigQueryExport.yaml similarity index 99% rename from mmv1/products/securitycenterv2/go_FolderSccBigQueryExports.yaml rename to mmv1/products/securitycenterv2/go_FolderSccBigQueryExport.yaml index 1c3263f4b062..db12c0518cc2 100644 --- a/mmv1/products/securitycenterv2/go_FolderSccBigQueryExports.yaml +++ b/mmv1/products/securitycenterv2/go_FolderSccBigQueryExport.yaml @@ -13,7 +13,7 @@ # Warning: This is a temporary file, and should not be edited directly --- -name: 'FolderSccBigQueryExports' +name: 'FolderSccBigQueryExport' description: | A Cloud Security Command Center (Cloud SCC) Big Query Export Config. It represents exporting Security Command Center data, including assets, findings, and security marks diff --git a/mmv1/products/securitycenterv2/go_ProjectSccBigQueryExports.yaml b/mmv1/products/securitycenterv2/go_ProjectSccBigQueryExport.yaml similarity index 99% rename from mmv1/products/securitycenterv2/go_ProjectSccBigQueryExports.yaml rename to mmv1/products/securitycenterv2/go_ProjectSccBigQueryExport.yaml index 73c06de1c11b..8f615c82ff70 100644 --- a/mmv1/products/securitycenterv2/go_ProjectSccBigQueryExports.yaml +++ b/mmv1/products/securitycenterv2/go_ProjectSccBigQueryExport.yaml @@ -13,7 +13,7 @@ # Warning: This is a temporary file, and should not be edited directly --- -name: 'ProjectSccBigQueryExports' +name: 'ProjectSccBigQueryExport' description: | A Cloud Security Command Center (Cloud SCC) Big Query Export Config. It represents exporting Security Command Center data, including assets, findings, and security marks diff --git a/mmv1/templates/terraform/examples/go/scc_v2_folder_big_query_export_config_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/scc_v2_folder_big_query_export_config_basic.tf.tmpl index 7e158156fc13..992235040377 100644 --- a/mmv1/templates/terraform/examples/go/scc_v2_folder_big_query_export_config_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/scc_v2_folder_big_query_export_config_basic.tf.tmpl @@ -22,7 +22,7 @@ resource "google_bigquery_dataset" "default" { } } -resource "google_scc_v2_folder_scc_big_query_exports" "{{$.PrimaryResourceId}}" { +resource "google_scc_v2_folder_scc_big_query_export" "{{$.PrimaryResourceId}}" { big_query_export_id = "{{index $.Vars "big_query_export_id"}}" folder = google_folder.folder.folder_id dataset = google_bigquery_dataset.default.id diff --git a/mmv1/templates/terraform/examples/go/scc_v2_project_big_query_export_config_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/scc_v2_project_big_query_export_config_basic.tf.tmpl index 1ef718a8b206..e64171114b48 100644 --- a/mmv1/templates/terraform/examples/go/scc_v2_project_big_query_export_config_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/scc_v2_project_big_query_export_config_basic.tf.tmpl @@ -15,7 +15,7 @@ resource "google_bigquery_dataset" "default" { } } -resource "google_scc_v2_project_scc_big_query_exports" "{{$.PrimaryResourceId}}" { +resource "google_scc_v2_project_scc_big_query_export" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "name"}}" big_query_export_id = "{{index $.Vars "big_query_export_id"}}" project = "{{index $.TestEnvVars "project"}}" diff --git a/mmv1/templates/terraform/examples/scc_v2_folder_big_query_export_config_basic.tf.erb b/mmv1/templates/terraform/examples/scc_v2_folder_big_query_export_config_basic.tf.erb index 004659847645..471b63eff42c 100644 --- a/mmv1/templates/terraform/examples/scc_v2_folder_big_query_export_config_basic.tf.erb +++ b/mmv1/templates/terraform/examples/scc_v2_folder_big_query_export_config_basic.tf.erb @@ -22,7 +22,7 @@ resource "google_bigquery_dataset" "default" { } } -resource "google_scc_v2_folder_scc_big_query_exports" "<%= ctx[:primary_resource_id] %>" { +resource "google_scc_v2_folder_scc_big_query_export" "<%= ctx[:primary_resource_id] %>" { big_query_export_id = "<%= ctx[:vars]['big_query_export_id'] %>" folder = google_folder.folder.folder_id dataset = google_bigquery_dataset.default.id diff --git a/mmv1/templates/terraform/examples/scc_v2_project_big_query_export_config_basic.tf.erb b/mmv1/templates/terraform/examples/scc_v2_project_big_query_export_config_basic.tf.erb index d88c63c15a4c..1f20867e722d 100644 --- a/mmv1/templates/terraform/examples/scc_v2_project_big_query_export_config_basic.tf.erb +++ b/mmv1/templates/terraform/examples/scc_v2_project_big_query_export_config_basic.tf.erb @@ -15,7 +15,7 @@ resource "google_bigquery_dataset" "default" { } } -resource "google_scc_v2_project_scc_big_query_exports" "<%= ctx[:primary_resource_id] %>" { +resource "google_scc_v2_project_scc_big_query_export" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['name'] %>" big_query_export_id = "<%= ctx[:vars]['big_query_export_id'] %>" project = "<%= ctx[:test_env_vars]['project'] %>" diff --git a/mmv1/third_party/terraform/services/securitycenterv2/resource_scc_v2_folder_big_query_export_config_test.go b/mmv1/third_party/terraform/services/securitycenterv2/resource_scc_v2_folder_big_query_export_config_test.go index 70ec6bb4ee85..e2ce56c590be 100644 --- a/mmv1/third_party/terraform/services/securitycenterv2/resource_scc_v2_folder_big_query_export_config_test.go +++ b/mmv1/third_party/terraform/services/securitycenterv2/resource_scc_v2_folder_big_query_export_config_test.go @@ -35,7 +35,7 @@ func TestAccSecurityCenterV2FolderBigQueryExportConfig_basic(t *testing.T) { Config: testAccSecurityCenterV2FolderBigQueryExportConfig_basic(context), }, { - ResourceName: "google_scc_v2_folder_scc_big_query_exports.default", + ResourceName: "google_scc_v2_folder_scc_big_query_export.default", ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{"update_time"}, @@ -44,7 +44,7 @@ func TestAccSecurityCenterV2FolderBigQueryExportConfig_basic(t *testing.T) { Config: testAccSecurityCenterV2FolderBigQueryExportConfig_update(context), }, { - ResourceName: "google_scc_v2_folder_scc_big_query_exports.default", + ResourceName: "google_scc_v2_folder_scc_big_query_export.default", ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{"update_time"}, @@ -85,7 +85,7 @@ resource "time_sleep" "wait_1_minute" { create_duration = "3m" } -resource "google_scc_v2_folder_scc_big_query_exports" "default" { +resource "google_scc_v2_folder_scc_big_query_export" "default" { big_query_export_id = "%{big_query_export_id}" folder = google_folder.folder.folder_id dataset = google_bigquery_dataset.default.id @@ -103,7 +103,7 @@ resource "google_scc_v2_folder_scc_big_query_exports" "default" { resource "time_sleep" "wait_for_cleanup" { create_duration = "3m" - depends_on = [google_scc_v2_folder_scc_big_query_exports.default] + depends_on = [google_scc_v2_folder_scc_big_query_export.default] } `, context) } @@ -135,7 +135,7 @@ resource "google_bigquery_dataset" "default" { } } -resource "google_scc_v2_folder_scc_big_query_exports" "default" { +resource "google_scc_v2_folder_scc_big_query_export" "default" { big_query_export_id = "%{big_query_export_id}" folder = google_folder.folder.folder_id dataset = google_bigquery_dataset.default.id @@ -151,7 +151,7 @@ resource "google_scc_v2_folder_scc_big_query_exports" "default" { resource "time_sleep" "wait_for_cleanup" { create_duration = "3m" - depends_on = [google_scc_v2_folder_scc_big_query_exports.default] + depends_on = [google_scc_v2_folder_scc_big_query_export.default] } `, context) } diff --git a/mmv1/third_party/terraform/services/securitycenterv2/resource_scc_v2_project_big_query_export_config_test.go b/mmv1/third_party/terraform/services/securitycenterv2/resource_scc_v2_project_big_query_export_config_test.go index 7769b14b48c1..dc2ad5b7abab 100644 --- a/mmv1/third_party/terraform/services/securitycenterv2/resource_scc_v2_project_big_query_export_config_test.go +++ b/mmv1/third_party/terraform/services/securitycenterv2/resource_scc_v2_project_big_query_export_config_test.go @@ -38,7 +38,7 @@ func TestAccSecurityCenterV2ProjectBigQueryExportConfig_basic(t *testing.T) { Config: testAccSecurityCenterV2ProjectBigQueryExportConfig_basic(context), }, { - ResourceName: "google_scc_v2_project_scc_big_query_exports.default", + ResourceName: "google_scc_v2_project_scc_big_query_export.default", ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{"update_time", "project"}, @@ -47,7 +47,7 @@ func TestAccSecurityCenterV2ProjectBigQueryExportConfig_basic(t *testing.T) { Config: testAccSecurityCenterV2ProjectBigQueryExportConfig_update(context), }, { - ResourceName: "google_scc_v2_project_scc_big_query_exports.default", + ResourceName: "google_scc_v2_project_scc_big_query_export.default", ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{"update_time", "project"}, @@ -81,7 +81,7 @@ resource "time_sleep" "wait_1_minute" { create_duration = "3m" } -resource "google_scc_v2_project_scc_big_query_exports" "default" { +resource "google_scc_v2_project_scc_big_query_export" "default" { big_query_export_id = "%{big_query_export_id}" project = "%{project}" dataset = google_bigquery_dataset.default.id @@ -115,7 +115,7 @@ resource "google_bigquery_dataset" "default" { } } -resource "google_scc_v2_project_scc_big_query_exports" "default" { +resource "google_scc_v2_project_scc_big_query_export" "default" { big_query_export_id = "%{big_query_export_id}" project = "%{project}" dataset = google_bigquery_dataset.default.id From ad5d51eae76de376b04bfaf3cdbbd1b67441b6aa Mon Sep 17 00:00:00 2001 From: Cameron Thornton Date: Fri, 6 Sep 2024 15:00:41 -0500 Subject: [PATCH 56/60] go rewrite healthcare (#11654) Co-authored-by: Nick Elliot --- mmv1/products/healthcare/go_ConsentStore.yaml | 113 ++++++ mmv1/products/healthcare/go_Dataset.yaml | 99 ++++++ mmv1/products/healthcare/go_DicomStore.yaml | 141 ++++++++ mmv1/products/healthcare/go_FhirStore.yaml | 335 ++++++++++++++++++ mmv1/products/healthcare/go_Hl7V2Store.yaml | 228 ++++++++++++ mmv1/products/healthcare/go_product.yaml | 24 ++ 6 files changed, 940 insertions(+) create mode 100644 mmv1/products/healthcare/go_ConsentStore.yaml create mode 100644 mmv1/products/healthcare/go_Dataset.yaml create mode 100644 mmv1/products/healthcare/go_DicomStore.yaml create mode 100644 mmv1/products/healthcare/go_FhirStore.yaml create mode 100644 mmv1/products/healthcare/go_Hl7V2Store.yaml create mode 100644 mmv1/products/healthcare/go_product.yaml diff --git a/mmv1/products/healthcare/go_ConsentStore.yaml b/mmv1/products/healthcare/go_ConsentStore.yaml new file mode 100644 index 000000000000..c074ab71c852 --- /dev/null +++ b/mmv1/products/healthcare/go_ConsentStore.yaml @@ -0,0 +1,113 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'ConsentStore' +kind: 'healthcare#consentStore' +description: | + The Consent Management API is a tool for tracking user consents and the documentation associated with the consents. +references: + guides: + 'Creating a Consent store': 'https://cloud.google.com/healthcare/docs/how-tos/consent' + api: 'https://cloud.google.com/healthcare/docs/reference/rest/v1/projects.locations.datasets.consentStores' +docs: +id_format: '{{dataset}}/consentStores/{{name}}' +base_url: '{{dataset}}/consentStores?consentStoreId={{name}}' +self_link: '{{dataset}}/consentStores/{{name}}' +update_verb: 'PATCH' +update_mask: true +import_format: + - '{{%dataset}}/consentStores/{{name}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +iam_policy: + method_name_separator: ':' + parent_resource_attribute: 'consent_store_id' + base_url: '{{%dataset}}/consentStores/{{name}}' + example_config_body: 'templates/terraform/iam/go/iam_attributes.go.tmpl' + self_link: '{{%dataset}}/consentStores/{{name}}' + import_format: + - '{{%dataset}}/consentStores/{{name}}' + - '{{name}}' +custom_code: +exclude_sweeper: true +examples: + - name: 'healthcare_consent_store_basic' + primary_resource_id: 'my-consent' + primary_resource_name: ' fmt.Sprintf("projects/%s/locations/%s/datasets/tf-test-my-dataset%s", envvar.GetTestProjectFromEnv(), envvar.GetTestRegionFromEnv(), context["random_suffix"]), fmt.Sprintf("tf-test-my-consent-store%s", context["random_suffix"])' + vars: + dataset_id: 'my-dataset' + consent_id: 'my-consent-store' + - name: 'healthcare_consent_store_full' + primary_resource_id: 'my-consent' + vars: + dataset_id: 'my-dataset' + consent_id: 'my-consent-store' + - name: 'healthcare_consent_store_iam' + primary_resource_id: 'my-consent' + vars: + dataset_id: 'my-dataset' + account_id: 'my-account' + consent_id: 'my-consent-store' +parameters: + - name: 'dataset' + type: ResourceRef + description: | + Identifies the dataset addressed by this request. Must be in the format + 'projects/{project}/locations/{location}/datasets/{dataset}' + url_param_only: true + required: true + immutable: true + resource: 'Dataset' + imports: 'selfLink' +properties: + - name: 'name' + type: String + description: | + The name of this ConsentStore, for example: + "consent1" + url_param_only: true + required: true + immutable: true + custom_flatten: 'templates/terraform/custom_flatten/go/name_from_self_link.tmpl' + - name: 'defaultConsentTtl' + type: String + description: | + Default time to live for consents in this store. Must be at least 24 hours. Updating this field will not affect the expiration time of existing consents. + + A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s". + required: false + - name: 'enableConsentCreateOnUpdate' + type: Boolean + description: | + If true, [consents.patch] [google.cloud.healthcare.v1.consent.UpdateConsent] creates the consent if it does not already exist. + required: false + - name: 'labels' + type: KeyValueLabels + description: | + User-supplied key-value pairs used to organize Consent stores. + + Label keys must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 bytes, and must + conform to the following PCRE regular expression: `[\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}` + + Label values are optional, must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 + bytes, and must conform to the following PCRE regular expression: `[\p{Ll}\p{Lo}\p{N}_-]{0,63}` + + No more than 64 labels can be associated with a given store. + + An object containing a list of "key": value pairs. + Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. + required: false diff --git a/mmv1/products/healthcare/go_Dataset.yaml b/mmv1/products/healthcare/go_Dataset.yaml new file mode 100644 index 000000000000..974434d6f102 --- /dev/null +++ b/mmv1/products/healthcare/go_Dataset.yaml @@ -0,0 +1,99 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'Dataset' +kind: 'healthcare#dataset' +description: | + A Healthcare `Dataset` is a toplevel logical grouping of `dicomStores`, `fhirStores` and `hl7V2Stores`. +references: + guides: + 'Creating a dataset': 'https://cloud.google.com/healthcare/docs/how-tos/datasets' + api: 'https://cloud.google.com/healthcare/docs/reference/rest/v1/projects.locations.datasets' +docs: +id_format: 'projects/{{project}}/locations/{{location}}/datasets/{{name}}' +base_url: 'projects/{{project}}/locations/{{location}}/datasets?datasetId={{name}}' +self_link: 'projects/{{project}}/locations/{{location}}/datasets/{{name}}' +update_verb: 'PATCH' +update_mask: true +import_format: + - 'projects/{{project}}/locations/{{location}}/datasets/{{name}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +custom_code: + decoder: 'templates/terraform/decoders/go/long_name_to_self_link.go.tmpl' +error_retry_predicates: + + - 'transport_tpg.HealthcareDatasetNotInitialized' +examples: + - name: 'healthcare_dataset_basic' + primary_resource_id: 'default' + vars: + dataset_name: 'example-dataset' + location: 'us-central1' + time_zone: 'America/New_York' + - name: 'healthcare_dataset_cmek' + primary_resource_id: 'default' + vars: + dataset_name: 'example-dataset' + location: 'us-central1' + time_zone: 'America/New_York' + key_name: 'example-key' + keyring_name: 'example-keyring' +parameters: + - name: 'location' + type: String + description: | + The location for the Dataset. + url_param_only: true + required: true + immutable: true +properties: + - name: 'name' + type: String + description: | + The resource name for the Dataset. + required: true + immutable: true + - name: 'timeZone' + type: String + description: | + The default timezone used by this dataset. Must be a either a valid IANA time zone name such as + "America/New_York" or empty, which defaults to UTC. This is used for parsing times in resources + (e.g., HL7 messages) where no explicit timezone is specified. + required: false + default_from_api: true + - name: 'selfLink' + type: String + description: | + The fully qualified name of this dataset + ignore_read: true + output: true + - name: 'encryptionSpec' + type: NestedObject + required: false + immutable: true + default_from_api: true + properties: + - name: 'kmsKeyName' + type: String + description: | + KMS encryption key that is used to secure this dataset and its sub-resources. The key used for + encryption and the dataset must be in the same location. If empty, the default Google encryption + key will be used to secure this dataset. The format is + projects/{projectId}/locations/{locationId}/keyRings/{keyRingId}/cryptoKeys/{keyId}. + required: false + immutable: true diff --git a/mmv1/products/healthcare/go_DicomStore.yaml b/mmv1/products/healthcare/go_DicomStore.yaml new file mode 100644 index 000000000000..c0d3bf6c2991 --- /dev/null +++ b/mmv1/products/healthcare/go_DicomStore.yaml @@ -0,0 +1,141 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'DicomStore' +kind: 'healthcare#dicomStore' +description: | + A DicomStore is a datastore inside a Healthcare dataset that conforms to the DICOM + (https://www.dicomstandard.org/about/) standard for Healthcare information exchange +references: + guides: + 'Creating a DICOM store': 'https://cloud.google.com/healthcare/docs/how-tos/dicom' + api: 'https://cloud.google.com/healthcare/docs/reference/rest/v1/projects.locations.datasets.dicomStores' +docs: +id_format: '{{dataset}}/dicomStores/{{name}}' +base_url: '{{dataset}}/dicomStores?dicomStoreId={{name}}' +self_link: '{{dataset}}/dicomStores/{{name}}' +update_verb: 'PATCH' +update_mask: true +import_format: + - '{{dataset}}/dicomStores/{{name}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +custom_code: + decoder: 'templates/terraform/decoders/go/long_name_to_self_link.go.tmpl' + custom_import: 'templates/terraform/custom_import/go/healthcare_dicom_store.go.tmpl' +exclude_sweeper: true +examples: + - name: 'healthcare_dicom_store_basic' + primary_resource_id: 'default' + vars: + dataset_name: 'example-dataset' + dicom_store_name: 'example-dicom-store' + pubsub_topic: 'dicom-notifications' + - name: 'healthcare_dicom_store_bq_stream' + primary_resource_id: 'default' + min_version: 'beta' + vars: + dataset_name: 'example-dataset' + dicom_store_name: 'example-dicom-store' + pubsub_topic: 'dicom-notifications' + bq_dataset_name: 'dicom_bq_ds' + bq_table_name: 'dicom_bq_tb' + test_vars_overrides: + 'policyChanged': ' acctest.BootstrapPSARoles(t, "service-", "gcp-sa-healthcare", []string{"roles/bigquery.dataEditor", "roles/bigquery.jobUser"})' +parameters: + - name: 'dataset' + type: ResourceRef + description: | + Identifies the dataset addressed by this request. Must be in the format + 'projects/{project}/locations/{location}/datasets/{dataset}' + url_param_only: true + required: true + immutable: true + resource: 'Dataset' + imports: 'selfLink' +properties: + - name: 'name' + type: String + description: | + The resource name for the DicomStore. + + ** Changing this property may recreate the Dicom store (removing all data) ** + required: true + immutable: true + - name: 'labels' + type: KeyValueLabels + description: | + User-supplied key-value pairs used to organize DICOM stores. + + Label keys must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 bytes, and must + conform to the following PCRE regular expression: [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62} + + Label values are optional, must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 + bytes, and must conform to the following PCRE regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63} + + No more than 64 labels can be associated with a given store. + + An object containing a list of "key": value pairs. + Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. + + required: false + - name: 'notificationConfig' + type: NestedObject + required: false + properties: + - name: 'pubsubTopic' + type: String + description: | + The Cloud Pub/Sub topic that notifications of changes are published on. Supplied by the client. + PubsubMessage.Data will contain the resource name. PubsubMessage.MessageId is the ID of this message. + It is guaranteed to be unique within the topic. PubsubMessage.PublishTime is the time at which the message + was published. Notifications are only sent if the topic is non-empty. Topic names must be scoped to a + project. service-PROJECT_NUMBER@gcp-sa-healthcare.iam.gserviceaccount.com must have publisher permissions on the given + Cloud Pub/Sub topic. Not having adequate permissions will cause the calls that send notifications to fail. + required: true + - name: 'sendForBulkImport' + type: Boolean + description: | + Indicates whether or not to send Pub/Sub notifications on bulk import. Only supported for DICOM imports. + required: false + - name: 'selfLink' + type: String + description: | + The fully qualified name of this dataset + ignore_read: true + output: true + - name: 'streamConfigs' + type: Array + description: | + To enable streaming to BigQuery, configure the streamConfigs object in your DICOM store. + streamConfigs is an array, so you can specify multiple BigQuery destinations. You can stream metadata from a single DICOM store to up to five BigQuery tables in a BigQuery dataset. + min_version: 'beta' + required: false + item_type: + type: NestedObject + properties: + - name: 'bigqueryDestination' + type: NestedObject + description: | + BigQueryDestination to include a fully qualified BigQuery table URI where DICOM instance metadata will be streamed. + required: true + properties: + - name: 'tableUri' + type: String + description: | + a fully qualified BigQuery table URI where DICOM instance metadata will be streamed. + required: true diff --git a/mmv1/products/healthcare/go_FhirStore.yaml b/mmv1/products/healthcare/go_FhirStore.yaml new file mode 100644 index 000000000000..09df23de954c --- /dev/null +++ b/mmv1/products/healthcare/go_FhirStore.yaml @@ -0,0 +1,335 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'FhirStore' +kind: 'healthcare#fhirStore' +description: | + A FhirStore is a datastore inside a Healthcare dataset that conforms to the FHIR (https://www.hl7.org/fhir/STU3/) + standard for Healthcare information exchange +references: + guides: + 'Creating a FHIR store': 'https://cloud.google.com/healthcare/docs/how-tos/fhir' + api: 'https://cloud.google.com/healthcare/docs/reference/rest/v1/projects.locations.datasets.fhirStores' +docs: +id_format: '{{dataset}}/fhirStores/{{name}}' +base_url: '{{dataset}}/fhirStores?fhirStoreId={{name}}' +self_link: '{{dataset}}/fhirStores/{{name}}' +update_verb: 'PATCH' +update_mask: true +import_format: + - '{{dataset}}/fhirStores/{{name}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +custom_code: + decoder: 'templates/terraform/decoders/go/long_name_to_self_link.go.tmpl' + custom_import: 'templates/terraform/custom_import/go/healthcare_fhir_store.go.tmpl' +exclude_sweeper: true +examples: + - name: 'healthcare_fhir_store_basic' + primary_resource_id: 'default' + vars: + dataset_name: 'example-dataset' + fhir_store_name: 'example-fhir-store' + pubsub_topic: 'fhir-notifications' + - name: 'healthcare_fhir_store_streaming_config' + primary_resource_id: 'default' + vars: + dataset_name: 'example-dataset' + fhir_store_name: 'example-fhir-store' + pubsub_topic: 'fhir-notifications' + bq_dataset_name: 'bq_example_dataset' + test_vars_overrides: + 'policyChanged': ' acctest.BootstrapPSARoles(t, "service-", "gcp-sa-healthcare", []string{"roles/bigquery.dataEditor", "roles/bigquery.jobUser"})' + - name: 'healthcare_fhir_store_notification_config' + primary_resource_id: 'default' + vars: + dataset_name: 'example-dataset' + fhir_store_name: 'example-fhir-store' + pubsub_topic: 'fhir-notifications' + exclude_docs: true + - name: 'healthcare_fhir_store_notification_configs' + primary_resource_id: 'default' + vars: + dataset_name: 'example-dataset' + fhir_store_name: 'example-fhir-store' + pubsub_topic: 'fhir-notifications' +parameters: + - name: 'dataset' + type: ResourceRef + description: | + Identifies the dataset addressed by this request. Must be in the format + 'projects/{project}/locations/{location}/datasets/{dataset}' + url_param_only: true + required: true + immutable: true + resource: 'Dataset' + imports: 'selfLink' +properties: + - name: 'name' + type: String + description: | + The resource name for the FhirStore. + + ** Changing this property may recreate the FHIR store (removing all data) ** + required: true + immutable: true + # Version is duplicated because it is optional in beta but required in GA. + - name: 'version' + type: Enum + description: | + The FHIR specification version. + exact_version: 'beta' + required: false + immutable: true + default_value: "STU3" + enum_values: + - 'DSTU2' + - 'STU3' + - 'R4' + - name: 'version' + type: Enum + description: | + The FHIR specification version. + exact_version: ga + required: true + immutable: true + enum_values: + - 'DSTU2' + - 'STU3' + - 'R4' + - name: 'complexDataTypeReferenceParsing' + type: Enum + description: | + Enable parsing of references within complex FHIR data types such as Extensions. If this value is set to ENABLED, then features like referential integrity and Bundle reference rewriting apply to all references. If this flag has not been specified the behavior of the FHIR store will not change, references in complex data types will not be parsed. New stores will have this value set to ENABLED by default after a notification period. Warning: turning on this flag causes processing existing resources to fail if they contain references to non-existent resources. + default_from_api: true + enum_values: + - 'COMPLEX_DATA_TYPE_REFERENCE_PARSING_UNSPECIFIED' + - 'DISABLED' + - 'ENABLED' + - name: 'enableUpdateCreate' + type: Boolean + description: | + Whether this FHIR store has the updateCreate capability. This determines if the client can use an Update + operation to create a new resource with a client-specified ID. If false, all IDs are server-assigned through + the Create operation and attempts to Update a non-existent resource will return errors. Please treat the audit + logs with appropriate levels of care if client-specified resource IDs contain sensitive data such as patient + identifiers, those IDs will be part of the FHIR resource path recorded in Cloud audit logs and Cloud Pub/Sub + notifications. + required: false + - name: 'disableReferentialIntegrity' + type: Boolean + description: | + Whether to disable referential integrity in this FHIR store. This field is immutable after FHIR store + creation. The default value is false, meaning that the API will enforce referential integrity and fail the + requests that will result in inconsistent state in the FHIR store. When this field is set to true, the API + will skip referential integrity check. Consequently, operations that rely on references, such as + Patient.get$everything, will not return all the results if broken references exist. + + ** Changing this property may recreate the FHIR store (removing all data) ** + required: false + immutable: true + - name: 'disableResourceVersioning' + type: Boolean + description: | + Whether to disable resource versioning for this FHIR store. This field can not be changed after the creation + of FHIR store. If set to false, which is the default behavior, all write operations will cause historical + versions to be recorded automatically. The historical versions can be fetched through the history APIs, but + cannot be updated. If set to true, no historical versions will be kept. The server will send back errors for + attempts to read the historical versions. + + ** Changing this property may recreate the FHIR store (removing all data) ** + required: false + immutable: true + - name: 'enableHistoryImport' + type: Boolean + description: | + Whether to allow the bulk import API to accept history bundles and directly insert historical resource + versions into the FHIR store. Importing resource histories creates resource interactions that appear to have + occurred in the past, which clients may not want to allow. If set to false, history bundles within an import + will fail with an error. + + ** Changing this property may recreate the FHIR store (removing all data) ** + + ** This property can be changed manually in the Google Cloud Healthcare admin console without recreating the FHIR store ** + required: false + immutable: true + - name: 'enableHistoryModifications' + type: Boolean + description: | + Whether to allow the ExecuteBundle API to accept history bundles, and directly insert and overwrite historical + resource versions into the FHIR store. If set to false, using history bundles fails with an error. + min_version: 'beta' + required: false + - name: 'labels' + type: KeyValueLabels + description: | + User-supplied key-value pairs used to organize FHIR stores. + + Label keys must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 bytes, and must + conform to the following PCRE regular expression: [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62} + + Label values are optional, must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 + bytes, and must conform to the following PCRE regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63} + + No more than 64 labels can be associated with a given store. + + An object containing a list of "key": value pairs. + Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. + + required: false + - name: 'notificationConfig' + type: NestedObject + required: false + deprecation_message: '`notification_config` is deprecated and will be removed in a future major release. Use `notification_configs` instead.' + properties: + - name: 'pubsubTopic' + type: String + description: | + The Cloud Pub/Sub topic that notifications of changes are published on. Supplied by the client. + PubsubMessage.Data will contain the resource name. PubsubMessage.MessageId is the ID of this message. + It is guaranteed to be unique within the topic. PubsubMessage.PublishTime is the time at which the message + was published. Notifications are only sent if the topic is non-empty. Topic names must be scoped to a + project. service-PROJECT_NUMBER@gcp-sa-healthcare.iam.gserviceaccount.com must have publisher permissions on the given + Cloud Pub/Sub topic. Not having adequate permissions will cause the calls that send notifications to fail. + required: true + - name: 'selfLink' + type: String + description: | + The fully qualified name of this dataset + ignore_read: true + output: true + - name: 'streamConfigs' + type: Array + description: |- + A list of streaming configs that configure the destinations of streaming export for every resource mutation in + this FHIR store. Each store is allowed to have up to 10 streaming configs. After a new config is added, the next + resource mutation is streamed to the new location in addition to the existing ones. When a location is removed + from the list, the server stops streaming to that location. Before adding a new config, you must add the required + bigquery.dataEditor role to your project's Cloud Healthcare Service Agent service account. Some lag (typically on + the order of dozens of seconds) is expected before the results show up in the streaming destination. + item_type: + type: NestedObject + properties: + - name: 'resourceTypes' + type: Array + description: | + Supply a FHIR resource type (such as "Patient" or "Observation"). See + https://www.hl7.org/fhir/valueset-resource-types.html for a list of all FHIR resource types. The server treats + an empty list as an intent to stream all the supported resource types in this FHIR store. + item_type: + type: String + - name: 'bigqueryDestination' + type: NestedObject + description: | + The destination BigQuery structure that contains both the dataset location and corresponding schema config. + The output is organized in one table per resource type. The server reuses the existing tables (if any) that + are named after the resource types, e.g. "Patient", "Observation". When there is no existing table for a given + resource type, the server attempts to create one. + See the [streaming config reference](https://cloud.google.com/healthcare/docs/reference/rest/v1beta1/projects.locations.datasets.fhirStores#streamconfig) for more details. + required: true + properties: + - name: 'datasetUri' + type: String + description: | + BigQuery URI to a dataset, up to 2000 characters long, in the format bq://projectId.bqDatasetId + required: true + - name: 'schemaConfig' + type: NestedObject + description: | + The configuration for the exported BigQuery schema. + required: true + properties: + - name: 'schemaType' + type: Enum + description: | + Specifies the output schema type. + * ANALYTICS: Analytics schema defined by the FHIR community. + See https://github.com/FHIR/sql-on-fhir/blob/master/sql-on-fhir.md. + * ANALYTICS_V2: Analytics V2, similar to schema defined by the FHIR community, with added support for extensions with one or more occurrences and contained resources in stringified JSON. + * LOSSLESS: A data-driven schema generated from the fields present in the FHIR data being exported, with no additional simplification. + default_value: "ANALYTICS" + enum_values: + - 'ANALYTICS' + - 'ANALYTICS_V2' + - 'LOSSLESS' + - name: 'recursiveStructureDepth' + type: Integer + description: | + The depth for all recursive structures in the output analytics schema. For example, concept in the CodeSystem + resource is a recursive structure; when the depth is 2, the CodeSystem table will have a column called + concept.concept but not concept.concept.concept. If not specified or set to 0, the server will use the default + value 2. The maximum depth allowed is 5. + required: true + - name: 'lastUpdatedPartitionConfig' + type: NestedObject + description: | + The configuration for exported BigQuery tables to be partitioned by FHIR resource's last updated time column. + properties: + - name: 'type' + type: Enum + description: | + Type of partitioning. + required: true + enum_values: + - 'PARTITION_TYPE_UNSPECIFIED' + - 'HOUR' + - 'DAY' + - 'MONTH' + - 'YEAR' + - name: 'expirationMs' + type: String + description: | + Number of milliseconds for which to keep the storage for a partition. + - name: 'defaultSearchHandlingStrict' + type: Boolean + description: | + If true, overrides the default search behavior for this FHIR store to handling=strict which returns an error for unrecognized search parameters. + If false, uses the FHIR specification default handling=lenient which ignores unrecognized search parameters. + The handling can always be changed from the default on an individual API call by setting the HTTP header Prefer: handling=strict or Prefer: handling=lenient. + - name: 'notificationConfigs' + type: Array + description: |- + A list of notifcation configs that configure the notification for every resource mutation in this FHIR store. + item_type: + type: NestedObject + properties: + - name: 'pubsubTopic' + type: String + description: | + The Cloud Pub/Sub topic that notifications of changes are published on. Supplied by the client. + PubsubMessage.Data will contain the resource name. PubsubMessage.MessageId is the ID of this message. + It is guaranteed to be unique within the topic. PubsubMessage.PublishTime is the time at which the message + was published. Notifications are only sent if the topic is non-empty. Topic names must be scoped to a + project. service-PROJECT_NUMBER@gcp-sa-healthcare.iam.gserviceaccount.com must have publisher permissions on the given + Cloud Pub/Sub topic. Not having adequate permissions will cause the calls that send notifications to fail. + required: true + - name: 'sendFullResource' + type: Boolean + description: | + Whether to send full FHIR resource to this Pub/Sub topic for Create and Update operation. + Note that setting this to true does not guarantee that all resources will be sent in the format of + full FHIR resource. When a resource change is too large or during heavy traffic, only the resource name will be + sent. Clients should always check the "payloadType" label from a Pub/Sub message to determine whether + it needs to fetch the full resource as a separate operation. + - name: 'sendPreviousResourceOnDelete' + type: Boolean + description: | + Whether to send full FHIR resource to this Pub/Sub topic for deleting FHIR resource. Note that setting this to + true does not guarantee that all previous resources will be sent in the format of full FHIR resource. When a + resource change is too large or during heavy traffic, only the resource name will be sent. Clients should always + check the "payloadType" label from a Pub/Sub message to determine whether it needs to fetch the full previous + resource as a separate operation. diff --git a/mmv1/products/healthcare/go_Hl7V2Store.yaml b/mmv1/products/healthcare/go_Hl7V2Store.yaml new file mode 100644 index 000000000000..fdccead7323f --- /dev/null +++ b/mmv1/products/healthcare/go_Hl7V2Store.yaml @@ -0,0 +1,228 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'Hl7V2Store' +kind: 'healthcare#hl7V2Store' +description: | + A Hl7V2Store is a datastore inside a Healthcare dataset that conforms to the FHIR (https://www.hl7.org/hl7V2/STU3/) + standard for Healthcare information exchange +references: + guides: + 'Creating a HL7v2 Store': 'https://cloud.google.com/healthcare/docs/how-tos/hl7v2' + api: 'https://cloud.google.com/healthcare/docs/reference/rest/v1/projects.locations.datasets.hl7V2Stores' +docs: +id_format: '{{dataset}}/hl7V2Stores/{{name}}' +base_url: '{{dataset}}/hl7V2Stores?hl7V2StoreId={{name}}' +self_link: '{{dataset}}/hl7V2Stores/{{name}}' +update_verb: 'PATCH' +update_mask: true +import_format: + - '{{dataset}}/hl7V2Stores/{{name}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +custom_code: + decoder: 'templates/terraform/decoders/go/long_name_to_self_link.go.tmpl' + custom_import: 'templates/terraform/custom_import/go/healthcare_hl7_v2_store.go.tmpl' +exclude_sweeper: true +examples: + - name: 'healthcare_hl7_v2_store_basic' + primary_resource_id: 'store' + vars: + dataset_name: 'example-dataset' + hl7_v2_store_name: 'example-hl7-v2-store' + pubsub_topic: 'hl7-v2-notifications' + - name: 'healthcare_hl7_v2_store_parser_config' + primary_resource_id: 'store' + min_version: 'beta' + vars: + dataset_name: 'example-dataset' + hl7_v2_store_name: 'example-hl7-v2-store' + - name: 'healthcare_hl7_v2_store_unschematized' + primary_resource_id: 'store' + min_version: 'beta' + vars: + dataset_name: 'example-dataset' + hl7_v2_store_name: 'example-hl7-v2-store' +parameters: + - name: 'dataset' + type: ResourceRef + description: | + Identifies the dataset addressed by this request. Must be in the format + 'projects/{project}/locations/{location}/datasets/{dataset}' + url_param_only: true + required: true + immutable: true + resource: 'Dataset' + imports: 'selfLink' +properties: + - name: 'name' + type: String + description: | + The resource name for the Hl7V2Store. + + ** Changing this property may recreate the Hl7v2 store (removing all data) ** + required: true + immutable: true + - name: 'rejectDuplicateMessage' + type: Boolean + description: | + Determines whether duplicate messages are allowed. + required: false + default_value: false + - name: 'parserConfig' + type: NestedObject + required: false + default_from_api: true + update_mask_fields: + - 'parser_config.allow_null_header' + - 'parser_config.segment_terminator' + - 'parser_config.schema' + properties: + - name: 'allowNullHeader' + type: Boolean + description: | + Determines whether messages with no header are allowed. + at_least_one_of: + - 'parser_config.0.allow_null_header' + - 'parser_config.0.segment_terminator' + - 'parser_config.0.schema' + - name: 'segmentTerminator' + type: String + description: | + Byte(s) to be used as the segment terminator. If this is unset, '\r' will be used as segment terminator. + + A base64-encoded string. + at_least_one_of: + - 'parser_config.0.allow_null_header' + - 'parser_config.0.segment_terminator' + - 'parser_config.0.schema' + validation: + function: 'verify.ValidateBase64String' + - name: 'schema' + type: String + description: | + JSON encoded string for schemas used to parse messages in this + store if schematized parsing is desired. + at_least_one_of: + - 'parser_config.0.allow_null_header' + - 'parser_config.0.segment_terminator' + - 'parser_config.0.schema' + - 'parser_config.0.version' + state_func: 'func(v interface{}) string { s, _ := structure.NormalizeJsonString(v); return s }' + custom_flatten: 'templates/terraform/custom_flatten/go/json_schema.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/json_schema.tmpl' + validation: + function: 'validation.StringIsJSON' + - name: 'version' + type: Enum + description: | + The version of the unschematized parser to be used when a custom `schema` is not set. + immutable: true + default_value: "V1" + enum_values: + - 'V1' + - 'V2' + - 'V3' + - name: 'labels' + type: KeyValueLabels + description: | + User-supplied key-value pairs used to organize HL7v2 stores. + + Label keys must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 bytes, and must + conform to the following PCRE regular expression: [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62} + + Label values are optional, must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 + bytes, and must conform to the following PCRE regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63} + + No more than 64 labels can be associated with a given store. + + An object containing a list of "key": value pairs. + Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. + required: false + - name: 'notificationConfigs' + type: Array + description: |- + A list of notification configs. Each configuration uses a filter to determine whether to publish a + message (both Ingest & Create) on the corresponding notification destination. Only the message name + is sent as part of the notification. Supplied by the client. + item_type: + type: NestedObject + properties: + - name: 'pubsubTopic' + type: String + description: | + The Cloud Pub/Sub topic that notifications of changes are published on. Supplied by the client. + PubsubMessage.Data will contain the resource name. PubsubMessage.MessageId is the ID of this message. + It is guaranteed to be unique within the topic. PubsubMessage.PublishTime is the time at which the message + was published. Notifications are only sent if the topic is non-empty. Topic names must be scoped to a + project. service-PROJECT_NUMBER@gcp-sa-healthcare.iam.gserviceaccount.com must have publisher permissions on the given + Cloud Pub/Sub topic. Not having adequate permissions will cause the calls that send notifications to fail. + + If a notification cannot be published to Cloud Pub/Sub, errors will be logged to Stackdriver + required: true + - name: 'filter' + type: String + description: | + Restricts notifications sent for messages matching a filter. If this is empty, all messages + are matched. Syntax: https://cloud.google.com/appengine/docs/standard/python/search/query_strings + + Fields/functions available for filtering are: + + * messageType, from the MSH-9.1 field. For example, NOT messageType = "ADT". + * send_date or sendDate, the YYYY-MM-DD date the message was sent in the dataset's timeZone, from the MSH-7 segment. For example, send_date < "2017-01-02". + * sendTime, the timestamp when the message was sent, using the RFC3339 time format for comparisons, from the MSH-7 segment. For example, sendTime < "2017-01-02T00:00:00-05:00". + * sendFacility, the care center that the message came from, from the MSH-4 segment. For example, sendFacility = "ABC". + * PatientId(value, type), which matches if the message lists a patient having an ID of the given value and type in the PID-2, PID-3, or PID-4 segments. For example, PatientId("123456", "MRN"). + * labels.x, a string value of the label with key x as set using the Message.labels map. For example, labels."priority"="high". The operator :* can be used to assert the existence of a label. For example, labels."priority":*. + - name: 'notificationConfig' + type: NestedObject + removed_message: This field has been replaced by notificationConfigs + exact_version: ga + required: false + properties: + - name: 'pubsubTopic' + type: String + description: | + The Cloud Pub/Sub topic that notifications of changes are published on. Supplied by the client. + PubsubMessage.Data will contain the resource name. PubsubMessage.MessageId is the ID of this message. + It is guaranteed to be unique within the topic. PubsubMessage.PublishTime is the time at which the message + was published. Notifications are only sent if the topic is non-empty. Topic names must be scoped to a + project. service-PROJECT_NUMBER@gcp-sa-healthcare.iam.gserviceaccount.com must have publisher permissions on the given + Cloud Pub/Sub topic. Not having adequate permissions will cause the calls that send notifications to fail. + required: true + - name: 'notificationConfig' + type: NestedObject + exact_version: 'beta' + required: false + deprecation_message: '`notification_config` is deprecated and will be removed in a future major release. Use `notification_configs` instead.' + properties: + - name: 'pubsubTopic' + type: String + description: | + The Cloud Pub/Sub etopic that notifications of changes are published on. Supplied by the client. + PubsubMessage.Data will contain the resource name. PubsubMessage.MessageId is the ID of this message. + It is guaranteed to be unique within the topic. PubsubMessage.PublishTime is the time at which the message + was published. Notifications are only sent if the topic is non-empty. Topic names must be scoped to a + project. service-PROJECT_NUMBER@gcp-sa-healthcare.iam.gserviceaccount.com must have publisher permissions on the given + Cloud Pub/Sub topic. Not having adequate permissions will cause the calls that send notifications to fail. + required: true + - name: 'selfLink' + type: String + description: | + The fully qualified name of this dataset + ignore_read: true + output: true diff --git a/mmv1/products/healthcare/go_product.yaml b/mmv1/products/healthcare/go_product.yaml new file mode 100644 index 000000000000..f05789f111e2 --- /dev/null +++ b/mmv1/products/healthcare/go_product.yaml @@ -0,0 +1,24 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'Healthcare' +display_name: 'Cloud Healthcare' +versions: + - name: 'ga' + base_url: 'https://healthcare.googleapis.com/v1/' + - name: 'beta' + base_url: 'https://healthcare.googleapis.com/v1beta1/' +scopes: + - 'https://www.googleapis.com/auth/cloud-platform' From c2e2bf70c45e1afe0d214bca4eb51ee03507a09c Mon Sep 17 00:00:00 2001 From: karolgorc Date: Fri, 6 Sep 2024 22:05:49 +0200 Subject: [PATCH 57/60] Add disk resource policies field to the compute instance (#11527) --- .../compute/resource_compute_instance.go.erb | 17 ++ ...urce_compute_instance_from_template.go.erb | 9 + ...compute_instance_from_template_test.go.erb | 188 ++++++++++++++++++ .../resource_compute_instance_test.go.erb | 186 +++++++++++++++++ .../docs/d/compute_instance.html.markdown | 2 + .../docs/r/compute_instance.html.markdown | 2 + 6 files changed, 404 insertions(+) diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_instance.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_instance.go.erb index cfcfda8bb61c..8d8430aa929c 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_instance.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_instance.go.erb @@ -74,6 +74,7 @@ var ( "boot_disk.0.initialize_params.0.provisioned_throughput", "boot_disk.0.initialize_params.0.enable_confidential_compute", "boot_disk.0.initialize_params.0.storage_pool", + "boot_disk.0.initialize_params.0.resource_policies", } schedulingKeys = []string{ @@ -310,6 +311,17 @@ func ResourceComputeInstance() *schema.Resource { Description: `A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored (both PUT & PATCH) when empty.`, }, + "resource_policies": { + Type: schema.TypeList, + Elem: &schema.Schema{Type: schema.TypeString}, + Optional: true, + ForceNew: true, + AtLeastOneOf: initializeParamsKeys, + DiffSuppressFunc: tpgresource.CompareSelfLinkRelativePaths, + MaxItems: 1, + Description: `A list of self_links of resource policies to attach to the instance's boot disk. Modifying this list will cause the instance to recreate. Currently a max of 1 resource policy is supported.`, + }, + "provisioned_iops": { Type: schema.TypeInt, Optional: true, @@ -3049,6 +3061,10 @@ func expandBootDisk(d *schema.ResourceData, config *transport_tpg.Config, projec disk.InitializeParams.ResourceManagerTags = tpgresource.ExpandStringMap(d, "boot_disk.0.initialize_params.0.resource_manager_tags") } + if _, ok := d.GetOk("boot_disk.0.initialize_params.0.resource_policies"); ok { + disk.InitializeParams.ResourcePolicies = tpgresource.ConvertStringArr(d.Get("boot_disk.0.initialize_params.0.resource_policies").([]interface{})) + } + if v, ok := d.GetOk("boot_disk.0.initialize_params.0.storage_pool"); ok { disk.InitializeParams.StoragePool = v.(string) } @@ -3094,6 +3110,7 @@ func flattenBootDisk(d *schema.ResourceData, disk *compute.AttachedDisk, config "size": diskDetails.SizeGb, "labels": diskDetails.Labels, "resource_manager_tags": d.Get("boot_disk.0.initialize_params.0.resource_manager_tags"), + "resource_policies": diskDetails.ResourcePolicies, "provisioned_iops": diskDetails.ProvisionedIops, "provisioned_throughput": diskDetails.ProvisionedThroughput, "enable_confidential_compute": diskDetails.EnableConfidentialCompute, diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_instance_from_template.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_instance_from_template.go.erb index 272e9f9fc73a..2e77a70b3b48 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_instance_from_template.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_instance_from_template.go.erb @@ -251,6 +251,15 @@ func adjustInstanceFromTemplateDisks(d *schema.ResourceData, config *transport_t // only have the name (since they're global). disk.InitializeParams.DiskType = fmt.Sprintf("zones/%s/diskTypes/%s", zone.Name, dt) } + if rp := disk.InitializeParams.ResourcePolicies; len(rp) > 0 { + // Instances need a URL for the resource policy, but instance templates + // only have the name (since they're global). + for i := range rp { + rp[i], _ = parseUniqueId(rp[i]) // in some cases the API translation doesn't work and returns entire url when only name is provided. And allows for id to be passed as well + rp[i] = fmt.Sprintf("projects/%s/regions/%s/resourcePolicies/%s", project, regionFromUrl(zone.Region), rp[i]) + } + disk.InitializeParams.ResourcePolicies = rp + } } disks = append(disks, disk) break diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_instance_from_template_test.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_instance_from_template_test.go.erb index 69e7383c0100..eadd40e1cbe8 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_instance_from_template_test.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_instance_from_template_test.go.erb @@ -169,6 +169,38 @@ func TestAccComputeInstanceFromTemplateWithOverride_localSsdRecoveryTimeout(t *t }) } +func TestAccComputeInstanceFromTemplate_diskResourcePolicies(t *testing.T) { + t.Parallel() + + var instance compute.Instance + templateName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + suffix := acctest.RandString(t, 10) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceFromTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceFromTemplate_diskResourcePoliciesCreate(suffix, templateName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, "google_compute_instance_from_template.foobar", &instance), + ), + }, + { + Config: testAccComputeInstanceFromTemplate_diskResourcePoliciesUpdate(suffix, templateName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, "google_compute_instance_from_template.foobar", &instance), + ), + }, + { + Config: testAccComputeInstanceFromTemplate_diskResourcePoliciesTwoPolicies(suffix, templateName), + ExpectError: regexp.MustCompile("Too many list items"), + }, + }, + }) +} + <% unless version == 'ga' -%> func TestAccComputeInstanceFromTemplate_partnerMetadata(t *testing.T) { t.Parallel() @@ -1857,3 +1889,159 @@ resource "google_compute_instance_from_template" "foobar" { } `, template, instance, template, instance) } + +func testAccComputeInstanceFromTemplate_diskResourcePoliciesCreate(suffix, template string) string { + return fmt.Sprintf(` +resource "google_compute_resource_policy" "test-snapshot-policy" { + name = "test-policy-%s" + snapshot_schedule_policy { + schedule { + hourly_schedule { + hours_in_cycle = 1 + start_time = "11:00" + } + } + } +} + +resource "google_compute_resource_policy" "test-snapshot-policy2" { + name = "test-policy2-%s" + snapshot_schedule_policy { + schedule { + hourly_schedule { + hours_in_cycle = 1 + start_time = "22:00" + } + } + } +} + +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_region_instance_template" "foobar" { + name = "%s" + region = "us-central1" + machine_type = "n1-standard-1" + disk { + resource_policies = [ google_compute_resource_policy.test-snapshot-policy.name ] + source_image = data.google_compute_image.my_image.self_link + } + network_interface { + network = "default" + } +} + +resource "google_compute_instance_from_template" "foobar" { + name = "%s" + zone = "us-central1-a" + source_instance_template = google_compute_region_instance_template.foobar.id +} +`, suffix, suffix, template, template) +} + +func testAccComputeInstanceFromTemplate_diskResourcePoliciesUpdate(suffix, template string) string { + return fmt.Sprintf(` +resource "google_compute_resource_policy" "test-snapshot-policy" { + name = "test-policy-%s" + snapshot_schedule_policy { + schedule { + hourly_schedule { + hours_in_cycle = 1 + start_time = "11:00" + } + } + } +} + +resource "google_compute_resource_policy" "test-snapshot-policy2" { + name = "test-policy2-%s" + snapshot_schedule_policy { + schedule { + hourly_schedule { + hours_in_cycle = 1 + start_time = "22:00" + } + } + } +} + +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_region_instance_template" "foobar" { + name = "%s" + region = "us-central1" + machine_type = "n1-standard-1" + disk { + resource_policies = [ google_compute_resource_policy.test-snapshot-policy2.name ] + source_image = data.google_compute_image.my_image.self_link + } + network_interface { + network = "default" + } +} + +resource "google_compute_instance_from_template" "foobar" { + name = "%s" + zone = "us-central1-a" + source_instance_template = google_compute_region_instance_template.foobar.id +} +`, suffix, suffix, template, template) +} + +func testAccComputeInstanceFromTemplate_diskResourcePoliciesTwoPolicies(suffix, template string) string { + return fmt.Sprintf(` +resource "google_compute_resource_policy" "test-snapshot-policy" { + name = "test-policy-%s" + snapshot_schedule_policy { + schedule { + hourly_schedule { + hours_in_cycle = 1 + start_time = "11:00" + } + } + } +} + +resource "google_compute_resource_policy" "test-snapshot-policy2" { + name = "test-policy2-%s" + snapshot_schedule_policy { + schedule { + hourly_schedule { + hours_in_cycle = 1 + start_time = "22:00" + } + } + } +} + +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_region_instance_template" "foobar" { + name = "%s" + region = "us-central1" + machine_type = "n1-standard-1" + disk { + resource_policies = [ google_compute_resource_policy.test-snapshot-policy.name, google_compute_resource_policy.test-snapshot-policy2.name ] + source_image = data.google_compute_image.my_image.self_link + } + network_interface { + network = "default" + } +} + +resource "google_compute_instance_from_template" "foobar" { + name = "%s" + zone = "us-central1-a" + source_instance_template = google_compute_region_instance_template.foobar.id +} + `, suffix, suffix, template, template) +} diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_instance_test.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_instance_test.go.erb index 1a85326f6392..5813f8195f36 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_instance_test.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_instance_test.go.erb @@ -337,6 +337,42 @@ func TestAccComputeInstance_resourceManagerTags(t *testing.T) { }) } +func TestAccComputeInstance_diskResourcePolicies(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + context := map[string]interface{}{ + "project": envvar.GetTestProjectFromEnv(), + "random_suffix": acctest.RandString(t, 10), + "instance_name": instanceName, + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_diskResourcePoliciesOnePolicy(context), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, "google_compute_instance.foobar", &instance), + ), + }, + { + Config: testAccComputeInstance_diskResourcePoliciesOnePolicyUpdate(context), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, "google_compute_instance.foobar", &instance), + ), + }, + { + Config: testAccComputeInstance_diskResourcePoliciesTwoPolicies(context), + ExpectError: regexp.MustCompile("Too many list items"), + }, + }, + }) +} + func TestAccComputeInstance_machineTypeUrl(t *testing.T) { t.Parallel() @@ -4983,6 +5019,156 @@ resource "google_compute_instance" "foobar" { `, context) } +func testAccComputeInstance_diskResourcePoliciesOnePolicy(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_resource_policy" "test-snapshot-policy" { + name = "test-policy-%{random_suffix}" + snapshot_schedule_policy { + schedule { + hourly_schedule { + hours_in_cycle = 1 + start_time = "11:00" + } + } + } +} + +resource "google_compute_resource_policy" "test-snapshot-policy2" { + name = "test-policy2-%{random_suffix}" + snapshot_schedule_policy { + schedule { + hourly_schedule { + hours_in_cycle = 1 + start_time = "22:00" + } + } + } +} + +resource "google_compute_instance" "foobar" { + name = "%{instance_name}" + machine_type = "e2-medium" + zone = "us-central1-a" + + boot_disk { + initialize_params { + resource_policies = [google_compute_resource_policy.test-snapshot-policy.id] + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } +} +`, context) +} + +func testAccComputeInstance_diskResourcePoliciesOnePolicyUpdate(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_resource_policy" "test-snapshot-policy" { + name = "test-policy-%{random_suffix}" + snapshot_schedule_policy { + schedule { + hourly_schedule { + hours_in_cycle = 1 + start_time = "11:00" + } + } + } +} + +resource "google_compute_resource_policy" "test-snapshot-policy2" { + name = "test-policy2-%{random_suffix}" + snapshot_schedule_policy { + schedule { + hourly_schedule { + hours_in_cycle = 1 + start_time = "22:00" + } + } + } +} + +resource "google_compute_instance" "foobar" { + name = "%{instance_name}" + machine_type = "e2-medium" + zone = "us-central1-a" + + boot_disk { + initialize_params { + resource_policies = [google_compute_resource_policy.test-snapshot-policy2.id] + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } +} +`, context) +} + +func testAccComputeInstance_diskResourcePoliciesTwoPolicies(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_resource_policy" "test-snapshot-policy" { + name = "test-policy-%{random_suffix}" + snapshot_schedule_policy { + schedule { + hourly_schedule { + hours_in_cycle = 1 + start_time = "11:00" + } + } + } +} + +resource "google_compute_resource_policy" "test-snapshot-policy2" { + name = "test-policy2-%{random_suffix}" + snapshot_schedule_policy { + schedule { + hourly_schedule { + hours_in_cycle = 1 + start_time = "22:00" + } + } + } +} + +resource "google_compute_instance" "foobar" { + name = "%{instance_name}" + machine_type = "e2-medium" + zone = "us-central1-a" + + boot_disk { + initialize_params { + resource_policies = [google_compute_resource_policy.test-snapshot-policy2.id, google_compute_resource_policy.test-snapshot-policy.id] + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } +} +`, context) +} + func testAccComputeInstance_basic_deletionProtectionFalse(instance string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { diff --git a/mmv1/third_party/terraform/website/docs/d/compute_instance.html.markdown b/mmv1/third_party/terraform/website/docs/d/compute_instance.html.markdown index c52917b0fef5..c30d589a75d3 100644 --- a/mmv1/third_party/terraform/website/docs/d/compute_instance.html.markdown +++ b/mmv1/third_party/terraform/website/docs/d/compute_instance.html.markdown @@ -131,6 +131,8 @@ The following arguments are supported: * `labels` - A set of key/value label pairs assigned to the disk. +* `resource_policies` - A list of self_links to resource policies attached to the selected `boot_disk` + The `scratch_disk` block supports: * `interface` - The disk interface used for attaching this disk. One of `SCSI` or `NVME`. diff --git a/mmv1/third_party/terraform/website/docs/r/compute_instance.html.markdown b/mmv1/third_party/terraform/website/docs/r/compute_instance.html.markdown index 20bd5f0262bf..2b931df4845a 100644 --- a/mmv1/third_party/terraform/website/docs/r/compute_instance.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/compute_instance.html.markdown @@ -302,6 +302,8 @@ is desired, you will need to modify your state file manually using * `resource_manager_tags` - (Optional) A tag is a key-value pair that can be attached to a Google Cloud resource. You can use tags to conditionally allow or deny policies based on whether a resource has a specific tag. This value is not returned by the API. In Terraform, this value cannot be updated and changing it will recreate the resource. +* `resource_policies` - (Optional) A list of self_links of resource policies to attach to the instance's boot disk. Modifying this list will cause the instance to recreate. Currently a max of 1 resource policy is supported. + * `provisioned_iops` - (Optional) Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. For more details,see the [Hyperdisk documentation](https://cloud.google.com/compute/docs/disks/hyperdisks). From a38f39db43aff4085145e0934910a29e29ae8852 Mon Sep 17 00:00:00 2001 From: Thomas Hutcheson <79571523+thomashutcheson-msm@users.noreply.github.com> Date: Fri, 6 Sep 2024 22:17:00 +0100 Subject: [PATCH 58/60] update datastream desired_state description (#7636) Co-authored-by: Thomas Rodgers --- mmv1/products/datastream/Stream.yaml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/mmv1/products/datastream/Stream.yaml b/mmv1/products/datastream/Stream.yaml index 419080af8ed7..48a4373e9373 100644 --- a/mmv1/products/datastream/Stream.yaml +++ b/mmv1/products/datastream/Stream.yaml @@ -31,7 +31,9 @@ virtual_fields: - !ruby/object:Api::Type::String name: 'desired_state' description: | - Desired state of the Stream. Set this field to `RUNNING` to start the stream, and `PAUSED` to pause the stream. + Desired state of the Stream. Set this field to `RUNNING` to start the stream, + `NOT_STARTED` to create the stream without starting and `PAUSED` to pause + the stream from a `RUNNING` state. Possible values: NOT_STARTED, RUNNING, PAUSED. Default: NOT_STARTED default_value: NOT_STARTED custom_code: !ruby/object:Provider::Terraform::CustomCode From 4bcf068f4aa20cd0aaf93306833fd956d8518bde Mon Sep 17 00:00:00 2001 From: Ryan Oaks Date: Fri, 6 Sep 2024 17:18:17 -0400 Subject: [PATCH 59/60] Update and fix CI setup instructions (#11658) --- .ci/infra/terraform/README.md | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/.ci/infra/terraform/README.md b/.ci/infra/terraform/README.md index fb1898e47119..0da75e6d6ee3 100644 --- a/.ci/infra/terraform/README.md +++ b/.ci/infra/terraform/README.md @@ -15,7 +15,6 @@ After applying this configuration: - (Internal only) Enable stubbed calls for GKE MultiCloud resources - (Internal only) Verify ownership of `hashicorptest.com` for new service account - Enable Media CDN -- Enable Game Services - Enable Access Boundary permissions - Enable BigQuery Table IAM conditions - Deploy "Hello World" app: https://cloud.google.com/appengine/docs/flexible/go/create-app @@ -24,14 +23,14 @@ After applying this configuration: git clone https://github.com/GoogleCloudPlatform/golang-samples cp -r golang-samples/appengine_flexible/helloworld ./. cd helloworld - gcloud app deploy --project= + gcloud app deploy --project= --billing-project= ``` - Create repo for "Hello World" function: https://cloud.google.com/source-repositories/docs/deploy-cloud-functions-version-control ``` gcloud source repos create cloudfunctions-test-do-not-delete --project= gcloud source repos clone cloudfunctions-test-do-not-delete --project= cd cloudfunctions-test-do-not-delete - curl https://raw.githubusercontent.com/GoogleCloudPlatform/magic-modules/main/mmv1/third_party/terraform/utils/test-fixtures/cloudfunctions/http_trigger.s > index.js + curl https://raw.githubusercontent.com/GoogleCloudPlatform/magic-modules/main/mmv1/third_party/terraform/services/cloudfunctions/test-fixtures/http_trigger.js > index.js git add . git commit -m "Initial commit" git push origin main @@ -40,8 +39,8 @@ After applying this configuration: ``` - Enable Multi-Tenancy ``` - curl --header "Authorization: Bearer $(gcloud auth print-access-token -q)" --header "X-Goog-User-Project: " -X POST https://identitytoolkit.oogleapis.com/v2/projects//identityPlatform:initializeAuth - curl --header "Content-Type: application/json" --header "Authorization: Bearer $(gcloud auth print-access-token -q)" --header "X-Goog-User-Project: project>" -X PATCH https://identitytoolkit.googleapis.com/admin/v2/projects//config?updateMask=multiTenant -d '{"multiTenant": {"allowTenants": rue}}' + curl --header "Authorization: Bearer $(gcloud auth print-access-token -q)" --header "X-Goog-User-Project: " -X POST https://identitytoolkit.googleapis.com/v2/projects//identityPlatform:initializeAuth + curl --header "Content-Type: application/json" --header "Authorization: Bearer $(gcloud auth print-access-token -q)" --header "X-Goog-User-Project: " -X PATCH https://identitytoolkit.googleapis.com/admin/v2/projects//config?updateMask=multiTenant -d '{"multiTenant": {"allowTenants": true}}' ``` - Add Group Admin role to new service account in the Google Workspace Admin Console: https://admin.google.com/ac/roles - Add a new test user in the Google Workspace Admin Console: https://admin.google.com/ac/users From c6f408424389676e2bf06240280f04ec4b837b7e Mon Sep 17 00:00:00 2001 From: Huy Pham Date: Fri, 6 Sep 2024 14:44:34 -0700 Subject: [PATCH 60/60] feat(containerattached): Add security_posture_config (#11516) --- mmv1/products/containerattached/Cluster.yaml | 14 ++++++++++++++ .../container_attached_cluster_full.tf.erb | 3 +++ .../pre_update/containerattached_update.go.erb | 5 ++++- ...ource_container_attached_cluster_update_test.go | 9 +++++++++ 4 files changed, 30 insertions(+), 1 deletion(-) diff --git a/mmv1/products/containerattached/Cluster.yaml b/mmv1/products/containerattached/Cluster.yaml index bda776fabb04..69499b15d3b0 100644 --- a/mmv1/products/containerattached/Cluster.yaml +++ b/mmv1/products/containerattached/Cluster.yaml @@ -355,3 +355,17 @@ properties: required: true description: | Namespace of the kubernetes secret containing the proxy config. + - !ruby/object:Api::Type::NestedObject + name: securityPostureConfig + description: | + Enable/Disable Security Posture API features for the cluster. + default_from_api: true + properties: + - !ruby/object:Api::Type::Enum + name: vulnerabilityMode + required: true + description: | + Sets the mode of the Kubernetes security posture API's workload vulnerability scanning. + values: + - :VULNERABILITY_DISABLED + - :VULNERABILITY_ENTERPRISE diff --git a/mmv1/templates/terraform/examples/container_attached_cluster_full.tf.erb b/mmv1/templates/terraform/examples/container_attached_cluster_full.tf.erb index 3d3fcf4eac8d..13c4c23858bc 100644 --- a/mmv1/templates/terraform/examples/container_attached_cluster_full.tf.erb +++ b/mmv1/templates/terraform/examples/container_attached_cluster_full.tf.erb @@ -46,4 +46,7 @@ resource "google_container_attached_cluster" "primary" { namespace = "default" } } + security_posture_config { + vulnerability_mode = "VULNERABILITY_ENTERPRISE" + } } diff --git a/mmv1/templates/terraform/pre_update/containerattached_update.go.erb b/mmv1/templates/terraform/pre_update/containerattached_update.go.erb index a9c403919777..f24227122e00 100644 --- a/mmv1/templates/terraform/pre_update/containerattached_update.go.erb +++ b/mmv1/templates/terraform/pre_update/containerattached_update.go.erb @@ -19,9 +19,12 @@ if d.HasChange("proxy_config") { newUpdateMask = append(newUpdateMask, "proxy_config.kubernetes_secret.name") newUpdateMask = append(newUpdateMask, "proxy_config.kubernetes_secret.namespace") } +if d.HasChange("security_posture_config") { + newUpdateMask = append(newUpdateMask, "security_posture_config.vulnerability_mode") +} // Pull out any other set fields from the generated mask. for _, mask := range updateMask { - if mask == "authorization" || mask == "loggingConfig" || mask == "monitoringConfig" || mask == "binaryAuthorization" || mask == "proxyConfig" { + if mask == "authorization" || mask == "loggingConfig" || mask == "monitoringConfig" || mask == "binaryAuthorization" || mask == "proxyConfig" || mask == "securityPostureConfig" { continue } newUpdateMask = append(newUpdateMask, mask) diff --git a/mmv1/third_party/terraform/services/containerattached/resource_container_attached_cluster_update_test.go b/mmv1/third_party/terraform/services/containerattached/resource_container_attached_cluster_update_test.go index e4de49f585d9..7c199b496517 100644 --- a/mmv1/third_party/terraform/services/containerattached/resource_container_attached_cluster_update_test.go +++ b/mmv1/third_party/terraform/services/containerattached/resource_container_attached_cluster_update_test.go @@ -118,6 +118,9 @@ resource "google_container_attached_cluster" "primary" { namespace = "default" } } + security_posture_config { + vulnerability_mode = "VULNERABILITY_ENTERPRISE" + } } `, context) } @@ -166,6 +169,9 @@ resource "google_container_attached_cluster" "primary" { namespace = "custom-ns" } } + security_posture_config { + vulnerability_mode = "VULNERABILITY_DISABLED" + } lifecycle { prevent_destroy = true } @@ -312,6 +318,9 @@ resource "google_container_attached_cluster" "primary" { namespace = "custom-ns" } } + security_posture_config { + vulnerability_mode = "VULNERABILITY_DISABLED" + } } `, context) }