diff --git a/.changelog/11883.txt b/.changelog/11883.txt new file mode 100644 index 00000000000..3b6b8c183a6 --- /dev/null +++ b/.changelog/11883.txt @@ -0,0 +1,6 @@ +```release-note:enhancement +containeraws: added `kubelet_config` field group to the `google_container_aws_node_pool` resource +``` +```release-note:enhancement +assuredworkloads: added `HEALTHCARE_AND_LIFE_SCIENCES_CONTROLS` and `HEALTHCARE_AND_LIFE_SCIENCES_CONTROLS_WITH_US_SUPPORT` enum values to `compliance_regime` in the `google_assuredworkload_workload` resource +``` \ No newline at end of file diff --git a/go.mod b/go.mod index 57b44d578a1..6f4adfafb70 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.21 require ( cloud.google.com/go/bigtable v1.30.0 - github.com/GoogleCloudPlatform/declarative-resource-client-library v1.72.0 + github.com/GoogleCloudPlatform/declarative-resource-client-library v1.74.0 github.com/apparentlymart/go-cidr v1.1.0 github.com/davecgh/go-spew v1.1.1 github.com/dnaeon/go-vcr v1.0.1 diff --git a/go.sum b/go.sum index 8213bad59ed..8502771a073 100644 --- a/go.sum +++ b/go.sum @@ -440,3 +440,7 @@ honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= rsc.io/binaryregexp v0.2.0 h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +github.com/GoogleCloudPlatform/declarative-resource-client-library v1.73.0 h1:mVsrkdw7rJbmay3EE/KjHx7WbQcrfwLmxmzCFDXIl90= +github.com/GoogleCloudPlatform/declarative-resource-client-library v1.73.0/go.mod h1:pL2Qt5HT+x6xrTd806oMiM3awW6kNIXB/iiuClz6m6k= +github.com/GoogleCloudPlatform/declarative-resource-client-library v1.74.0 h1:YV3eTXgDw3Zp8Mc12WE2Aa3+22twNd07xkFkEODrlOQ= +github.com/GoogleCloudPlatform/declarative-resource-client-library v1.74.0/go.mod h1:pL2Qt5HT+x6xrTd806oMiM3awW6kNIXB/iiuClz6m6k= diff --git a/google/services/assuredworkloads/resource_assured_workloads_workload.go b/google/services/assuredworkloads/resource_assured_workloads_workload.go index ba67826adc2..545169405b4 100644 --- a/google/services/assuredworkloads/resource_assured_workloads_workload.go +++ b/google/services/assuredworkloads/resource_assured_workloads_workload.go @@ -60,7 +60,7 @@ func ResourceAssuredWorkloadsWorkload() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - Description: "Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT, KSA_REGIONS_AND_SUPPORT_WITH_SOVEREIGNTY_CONTROLS, REGIONAL_CONTROLS", + Description: "Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT, KSA_REGIONS_AND_SUPPORT_WITH_SOVEREIGNTY_CONTROLS, REGIONAL_CONTROLS, HEALTHCARE_AND_LIFE_SCIENCES_CONTROLS, HEALTHCARE_AND_LIFE_SCIENCES_CONTROLS_WITH_US_SUPPORT", }, "display_name": { diff --git a/google/services/containeraws/resource_container_aws_node_pool.go b/google/services/containeraws/resource_container_aws_node_pool.go index 40e58fd2e73..1203d3a4ff1 100644 --- a/google/services/containeraws/resource_container_aws_node_pool.go +++ b/google/services/containeraws/resource_container_aws_node_pool.go @@ -123,6 +123,16 @@ func ResourceContainerAwsNodePool() *schema.Resource { Description: "All of annotations (key/value pairs) present on the resource in GCP, including the annotations configured through Terraform, other clients and services.", }, + "kubelet_config": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: "The kubelet configuration for the node pool.", + MaxItems: 1, + Elem: ContainerAwsNodePoolKubeletConfigSchema(), + }, + "management": { Type: schema.TypeList, Computed: true, @@ -446,6 +456,42 @@ func ContainerAwsNodePoolMaxPodsConstraintSchema() *schema.Resource { } } +func ContainerAwsNodePoolKubeletConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cpu_cfs_quota": { + Type: schema.TypeBool, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Whether or not to enable CPU CFS quota. Defaults to true.", + }, + + "cpu_cfs_quota_period": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The CPU CFS quota period to use for the node. Defaults to \"100ms\".", + }, + + "cpu_manager_policy": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: "The CpuManagerPolicy to use for the node. Defaults to \"none\".", + }, + + "pod_pids_limit": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: "Optional. The maximum number of PIDs in each pod running on the node. The limit scales automatically based on underlying machine size if left unset.", + }, + }, + } +} + func ContainerAwsNodePoolManagementSchema() *schema.Resource { return &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -511,6 +557,7 @@ func resourceContainerAwsNodePoolCreate(d *schema.ResourceData, meta interface{} SubnetId: dcl.String(d.Get("subnet_id").(string)), Version: dcl.String(d.Get("version").(string)), Annotations: tpgresource.CheckStringMap(d.Get("effective_annotations")), + KubeletConfig: expandContainerAwsNodePoolKubeletConfig(d.Get("kubelet_config")), Management: expandContainerAwsNodePoolManagement(d.Get("management")), Project: dcl.String(project), UpdateSettings: expandContainerAwsNodePoolUpdateSettings(d.Get("update_settings")), @@ -570,6 +617,7 @@ func resourceContainerAwsNodePoolRead(d *schema.ResourceData, meta interface{}) SubnetId: dcl.String(d.Get("subnet_id").(string)), Version: dcl.String(d.Get("version").(string)), Annotations: tpgresource.CheckStringMap(d.Get("effective_annotations")), + KubeletConfig: expandContainerAwsNodePoolKubeletConfig(d.Get("kubelet_config")), Management: expandContainerAwsNodePoolManagement(d.Get("management")), Project: dcl.String(project), UpdateSettings: expandContainerAwsNodePoolUpdateSettings(d.Get("update_settings")), @@ -624,6 +672,9 @@ func resourceContainerAwsNodePoolRead(d *schema.ResourceData, meta interface{}) if err = d.Set("effective_annotations", res.Annotations); err != nil { return fmt.Errorf("error setting effective_annotations in state: %s", err) } + if err = d.Set("kubelet_config", flattenContainerAwsNodePoolKubeletConfig(res.KubeletConfig)); err != nil { + return fmt.Errorf("error setting kubelet_config in state: %s", err) + } if err = d.Set("management", tpgresource.FlattenContainerAwsNodePoolManagement(res.Management, d, config)); err != nil { return fmt.Errorf("error setting management in state: %s", err) } @@ -674,6 +725,7 @@ func resourceContainerAwsNodePoolUpdate(d *schema.ResourceData, meta interface{} SubnetId: dcl.String(d.Get("subnet_id").(string)), Version: dcl.String(d.Get("version").(string)), Annotations: tpgresource.CheckStringMap(d.Get("effective_annotations")), + KubeletConfig: expandContainerAwsNodePoolKubeletConfig(d.Get("kubelet_config")), Management: expandContainerAwsNodePoolManagement(d.Get("management")), Project: dcl.String(project), UpdateSettings: expandContainerAwsNodePoolUpdateSettings(d.Get("update_settings")), @@ -728,6 +780,7 @@ func resourceContainerAwsNodePoolDelete(d *schema.ResourceData, meta interface{} SubnetId: dcl.String(d.Get("subnet_id").(string)), Version: dcl.String(d.Get("version").(string)), Annotations: tpgresource.CheckStringMap(d.Get("effective_annotations")), + KubeletConfig: expandContainerAwsNodePoolKubeletConfig(d.Get("kubelet_config")), Management: expandContainerAwsNodePoolManagement(d.Get("management")), Project: dcl.String(project), UpdateSettings: expandContainerAwsNodePoolUpdateSettings(d.Get("update_settings")), @@ -1080,6 +1133,38 @@ func flattenContainerAwsNodePoolMaxPodsConstraint(obj *containeraws.NodePoolMaxP } +func expandContainerAwsNodePoolKubeletConfig(o interface{}) *containeraws.NodePoolKubeletConfig { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &containeraws.NodePoolKubeletConfig{ + CpuCfsQuota: dcl.Bool(obj["cpu_cfs_quota"].(bool)), + CpuCfsQuotaPeriod: dcl.String(obj["cpu_cfs_quota_period"].(string)), + CpuManagerPolicy: containeraws.NodePoolKubeletConfigCpuManagerPolicyEnumRef(obj["cpu_manager_policy"].(string)), + PodPidsLimit: dcl.Int64(int64(obj["pod_pids_limit"].(int))), + } +} + +func flattenContainerAwsNodePoolKubeletConfig(obj *containeraws.NodePoolKubeletConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "cpu_cfs_quota": obj.CpuCfsQuota, + "cpu_cfs_quota_period": obj.CpuCfsQuotaPeriod, + "cpu_manager_policy": obj.CpuManagerPolicy, + "pod_pids_limit": obj.PodPidsLimit, + } + + return []interface{}{transformed} + +} + func expandContainerAwsNodePoolManagement(o interface{}) *containeraws.NodePoolManagement { if o == nil { return nil diff --git a/google/services/containeraws/resource_container_aws_node_pool_generated_test.go b/google/services/containeraws/resource_container_aws_node_pool_generated_test.go index ee335f12454..2741be8bdc8 100644 --- a/google/services/containeraws/resource_container_aws_node_pool_generated_test.go +++ b/google/services/containeraws/resource_container_aws_node_pool_generated_test.go @@ -276,10 +276,16 @@ resource "google_container_aws_node_pool" "primary" { auto_repair = true } + kubelet_config { + cpu_manager_policy = "none" + cpu_cfs_quota = true + cpu_cfs_quota_period = "100ms" + pod_pids_limit = 1024 + } + project = "%{project_name}" } - `, context) } @@ -436,10 +442,16 @@ resource "google_container_aws_node_pool" "primary" { auto_repair = false } + kubelet_config { + cpu_manager_policy = "none" + cpu_cfs_quota = true + cpu_cfs_quota_period = "100ms" + pod_pids_limit = 1024 + } + project = "%{project_name}" } - `, context) } diff --git a/website/docs/r/assured_workloads_workload.html.markdown b/website/docs/r/assured_workloads_workload.html.markdown index 0e5e71bf9a7..a954c56de52 100644 --- a/website/docs/r/assured_workloads_workload.html.markdown +++ b/website/docs/r/assured_workloads_workload.html.markdown @@ -146,7 +146,7 @@ The following arguments are supported: * `compliance_regime` - (Required) - Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT, KSA_REGIONS_AND_SUPPORT_WITH_SOVEREIGNTY_CONTROLS, REGIONAL_CONTROLS + Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT, KSA_REGIONS_AND_SUPPORT_WITH_SOVEREIGNTY_CONTROLS, REGIONAL_CONTROLS, HEALTHCARE_AND_LIFE_SCIENCES_CONTROLS, HEALTHCARE_AND_LIFE_SCIENCES_CONTROLS_WITH_US_SUPPORT * `display_name` - (Required) diff --git a/website/docs/r/container_aws_node_pool.html.markdown b/website/docs/r/container_aws_node_pool.html.markdown index ece2f7fbe8f..5932b836e11 100644 --- a/website/docs/r/container_aws_node_pool.html.markdown +++ b/website/docs/r/container_aws_node_pool.html.markdown @@ -179,10 +179,16 @@ resource "google_container_aws_node_pool" "primary" { auto_repair = true } + kubelet_config { + cpu_manager_policy = "none" + cpu_cfs_quota = true + cpu_cfs_quota_period = "100ms" + pod_pids_limit = 1024 + } + project = "my-project-name" } - ``` ## Example Usage - basic_enum_aws_cluster A basic example of a containeraws node pool with lowercase enums @@ -633,6 +639,10 @@ The `max_pods_constraint` block supports: **Note**: This field is non-authoritative, and will only manage the annotations present in your configuration. Please refer to the field `effective_annotations` for all of the annotations present on the resource. +* `kubelet_config` - + (Optional) + The kubelet configuration for the node pool. + * `management` - (Optional) The Management configuration for this node pool. @@ -721,6 +731,24 @@ The `taints` block supports: (Required) Value for the taint. +The `kubelet_config` block supports: + +* `cpu_cfs_quota` - + (Optional) + Whether or not to enable CPU CFS quota. Defaults to true. + +* `cpu_cfs_quota_period` - + (Optional) + Optional. The CPU CFS quota period to use for the node. Defaults to "100ms". + +* `cpu_manager_policy` - + (Optional) + The CpuManagerPolicy to use for the node. Defaults to "none". + +* `pod_pids_limit` - + (Optional) + Optional. The maximum number of PIDs in each pod running on the node. The limit scales automatically based on underlying machine size if left unset. + The `management` block supports: * `auto_repair` -