From 5259cce5c4a8284508cb21eb40e6ce76f58deca0 Mon Sep 17 00:00:00 2001 From: github-actions Date: Tue, 31 Dec 2024 16:04:46 +0000 Subject: [PATCH] chore(schema): update --- samtranslator/schema/schema.json | 540 +++---- schema_source/cloudformation-docs.json | 1761 +++++++++++++++++----- schema_source/cloudformation.schema.json | 540 +++---- 3 files changed, 1856 insertions(+), 985 deletions(-) diff --git a/samtranslator/schema/schema.json b/samtranslator/schema/schema.json index 71a3f4f02..2c0ce07d9 100644 --- a/samtranslator/schema/schema.json +++ b/samtranslator/schema/schema.json @@ -27447,13 +27447,9 @@ "additionalProperties": false, "properties": { "AccessPointId": { - "markdownDescription": "The Amazon EFS access point ID to use. If an access point is specified, the root directory value specified in the `EFSVolumeConfiguration` must either be omitted or set to `/` which enforces the path set on the EFS access point. If an access point is used, transit encryption must be enabled in the `EFSVolumeConfiguration` . For more information, see [Working with Amazon EFS access points](https://docs.aws.amazon.com/efs/latest/ug/efs-access-points.html) in the *Amazon Elastic File System User Guide* .", - "title": "AccessPointId", "type": "string" }, "Iam": { - "markdownDescription": "Whether or not to use the AWS Batch job IAM role defined in a job definition when mounting the Amazon EFS file system. If enabled, transit encryption must be enabled in the `EFSVolumeConfiguration` . If this parameter is omitted, the default value of `DISABLED` is used. For more information, see [Using Amazon EFS access points](https://docs.aws.amazon.com/batch/latest/userguide/efs-volumes.html#efs-volume-accesspoints) in the *AWS Batch User Guide* . EFS IAM authorization requires that `TransitEncryption` be `ENABLED` and that a `JobRoleArn` is specified.", - "title": "Iam", "type": "string" } }, @@ -27499,8 +27495,6 @@ "type": "string" }, "InstanceType": { - "markdownDescription": "The instance type to use for a multi-node parallel job. All node groups in a multi-node parallel job must use the same instance type.\n\n> This parameter isn't applicable to single-node container jobs or jobs that run on Fargate resources, and shouldn't be provided.", - "title": "InstanceType", "type": "string" }, "JobRoleArn": { @@ -27711,28 +27705,18 @@ "additionalProperties": false, "properties": { "AuthorizationConfig": { - "$ref": "#/definitions/AWS::Batch::JobDefinition.AuthorizationConfig", - "markdownDescription": "The authorization configuration details for the Amazon EFS file system.", - "title": "AuthorizationConfig" + "$ref": "#/definitions/AWS::Batch::JobDefinition.AuthorizationConfig" }, "FileSystemId": { - "markdownDescription": "The Amazon EFS file system ID to use.", - "title": "FileSystemId", "type": "string" }, "RootDirectory": { - "markdownDescription": "The directory within the Amazon EFS file system to mount as the root directory inside the host. If this parameter is omitted, the root of the Amazon EFS volume is used instead. Specifying `/` has the same effect as omitting this parameter. The maximum length is 4,096 characters.\n\n> If an EFS access point is specified in the `authorizationConfig` , the root directory parameter must either be omitted or set to `/` , which enforces the path set on the Amazon EFS access point.", - "title": "RootDirectory", "type": "string" }, "TransitEncryption": { - "markdownDescription": "Determines whether to enable encryption for Amazon EFS data in transit between the Amazon ECS host and the Amazon EFS server. Transit encryption must be enabled if Amazon EFS IAM authorization is used. If this parameter is omitted, the default value of `DISABLED` is used. For more information, see [Encrypting data in transit](https://docs.aws.amazon.com/efs/latest/ug/encryption-in-transit.html) in the *Amazon Elastic File System User Guide* .", - "title": "TransitEncryption", "type": "string" }, "TransitEncryptionPort": { - "markdownDescription": "The port to use when sending encrypted data between the Amazon ECS host and the Amazon EFS server. If you don't specify a transit encryption port, it uses the port selection strategy that the Amazon EFS mount helper uses. The value must be between 0 and 65,535. For more information, see [EFS mount helper](https://docs.aws.amazon.com/efs/latest/ug/efs-mount-helper.html) in the *Amazon Elastic File System User Guide* .", - "title": "TransitEncryptionPort", "type": "number" } }, @@ -28142,8 +28126,6 @@ "additionalProperties": false, "properties": { "Labels": { - "markdownDescription": "Key-value pairs used to identify, sort, and organize cube resources. Can contain up to 63 uppercase letters, lowercase letters, numbers, hyphens (-), and underscores (_). Labels can be added or modified at any time. Each resource can have multiple labels, but each key must be unique for a given object.", - "title": "Labels", "type": "object" } }, @@ -28153,18 +28135,12 @@ "additionalProperties": false, "properties": { "ContainerPath": { - "markdownDescription": "The path on the container where the host volume is mounted.", - "title": "ContainerPath", "type": "string" }, "ReadOnly": { - "markdownDescription": "If this value is `true` , the container has read-only access to the volume. Otherwise, the container can write to the volume. The default value is `false` .", - "title": "ReadOnly", "type": "boolean" }, "SourceVolume": { - "markdownDescription": "The name of the volume to mount.", - "title": "SourceVolume", "type": "string" } }, @@ -28249,57 +28225,39 @@ "items": { "$ref": "#/definitions/AWS::Batch::JobDefinition.EksContainer" }, - "markdownDescription": "The properties of the container that's used on the Amazon EKS pod.\n\n> This object is limited to 10 elements.", - "title": "Containers", "type": "array" }, "DnsPolicy": { - "markdownDescription": "The DNS policy for the pod. The default value is `ClusterFirst` . If the `hostNetwork` parameter is not specified, the default is `ClusterFirstWithHostNet` . `ClusterFirst` indicates that any DNS query that does not match the configured cluster domain suffix is forwarded to the upstream nameserver inherited from the node. For more information, see [Pod's DNS policy](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy) in the *Kubernetes documentation* .\n\nValid values: `Default` | `ClusterFirst` | `ClusterFirstWithHostNet`", - "title": "DnsPolicy", "type": "string" }, "HostNetwork": { - "markdownDescription": "Indicates if the pod uses the hosts' network IP address. The default value is `true` . Setting this to `false` enables the Kubernetes pod networking model. Most AWS Batch workloads are egress-only and don't require the overhead of IP allocation for each pod for incoming connections. For more information, see [Host namespaces](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/security/pod-security-policy/#host-namespaces) and [Pod networking](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/workloads/pods/#pod-networking) in the *Kubernetes documentation* .", - "title": "HostNetwork", "type": "boolean" }, "ImagePullSecrets": { "items": { "$ref": "#/definitions/AWS::Batch::JobDefinition.ImagePullSecret" }, - "markdownDescription": "", - "title": "ImagePullSecrets", "type": "array" }, "InitContainers": { "items": { "$ref": "#/definitions/AWS::Batch::JobDefinition.EksContainer" }, - "markdownDescription": "These containers run before application containers, always runs to completion, and must complete successfully before the next container starts. These containers are registered with the Amazon EKS Connector agent and persists the registration information in the Kubernetes backend data store. For more information, see [Init Containers](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/workloads/pods/init-containers/) in the *Kubernetes documentation* .\n\n> This object is limited to 10 elements.", - "title": "InitContainers", "type": "array" }, "Metadata": { - "$ref": "#/definitions/AWS::Batch::JobDefinition.Metadata", - "markdownDescription": "Metadata about the Kubernetes pod. For more information, see [Understanding Kubernetes Objects](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/overview/working-with-objects/kubernetes-objects/) in the *Kubernetes documentation* .", - "title": "Metadata" + "$ref": "#/definitions/AWS::Batch::JobDefinition.Metadata" }, "ServiceAccountName": { - "markdownDescription": "The name of the service account that's used to run the pod. For more information, see [Kubernetes service accounts](https://docs.aws.amazon.com/eks/latest/userguide/service-accounts.html) and [Configure a Kubernetes service account to assume an IAM role](https://docs.aws.amazon.com/eks/latest/userguide/associate-service-account-role.html) in the *Amazon EKS User Guide* and [Configure service accounts for pods](https://docs.aws.amazon.com/https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) in the *Kubernetes documentation* .", - "title": "ServiceAccountName", "type": "string" }, "ShareProcessNamespace": { - "markdownDescription": "Indicates if the processes in a container are shared, or visible, to other containers in the same pod. For more information, see [Share Process Namespace between Containers in a Pod](https://docs.aws.amazon.com/https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/) .", - "title": "ShareProcessNamespace", "type": "boolean" }, "Volumes": { "items": { "$ref": "#/definitions/AWS::Batch::JobDefinition.EksVolume" }, - "markdownDescription": "Specifies the volumes for a job definition that uses Amazon EKS resources.", - "title": "Volumes", "type": "array" } }, @@ -28524,8 +28482,6 @@ "additionalProperties": false, "properties": { "AttemptDurationSeconds": { - "markdownDescription": "The job timeout time (in seconds) that's measured from the job attempt's `startedAt` timestamp. After this time passes, AWS Batch terminates your jobs if they aren't finished. The minimum value for the timeout is 60 seconds.\n\nFor array jobs, the timeout applies to the child jobs, not to the parent array job.\n\nFor multi-node parallel (MNP) jobs, the timeout applies to the whole job, not to the individual nodes.", - "title": "AttemptDurationSeconds", "type": "number" } }, @@ -28589,18 +28545,12 @@ "additionalProperties": false, "properties": { "EfsVolumeConfiguration": { - "$ref": "#/definitions/AWS::Batch::JobDefinition.EfsVolumeConfiguration", - "markdownDescription": "This is used when you're using an Amazon Elastic File System file system for job storage. For more information, see [Amazon EFS Volumes](https://docs.aws.amazon.com/batch/latest/userguide/efs-volumes.html) in the *AWS Batch User Guide* .", - "title": "EfsVolumeConfiguration" + "$ref": "#/definitions/AWS::Batch::JobDefinition.EfsVolumeConfiguration" }, "Host": { - "$ref": "#/definitions/AWS::Batch::JobDefinition.VolumesHost", - "markdownDescription": "The contents of the `host` parameter determine whether your data volume persists on the host container instance and where it's stored. If the host parameter is empty, then the Docker daemon assigns a host path for your data volume. However, the data isn't guaranteed to persist after the containers that are associated with it stop running.\n\n> This parameter isn't applicable to jobs that are running on Fargate resources and shouldn't be provided.", - "title": "Host" + "$ref": "#/definitions/AWS::Batch::JobDefinition.VolumesHost" }, "Name": { - "markdownDescription": "The name of the volume. It can be up to 255 characters long. It can contain uppercase and lowercase letters, numbers, hyphens (-), and underscores (_). This name is referenced in the `sourceVolume` parameter of container definition `mountPoints` .", - "title": "Name", "type": "string" } }, @@ -28610,8 +28560,6 @@ "additionalProperties": false, "properties": { "SourcePath": { - "markdownDescription": "The path on the host container instance that's presented to the container. If this parameter is empty, then the Docker daemon has assigned a host path for you. If this parameter contains a file location, then the data volume persists at the specified location on the host container instance until you delete it manually. If the source path location doesn't exist on the host container instance, the Docker daemon creates it. If the location does exist, the contents of the source path folder are exported.\n\n> This parameter isn't applicable to jobs that run on Fargate resources. Don't provide this for these jobs.", - "title": "SourcePath", "type": "string" } }, @@ -29770,7 +29718,7 @@ "type": "string" }, "Type": { - "markdownDescription": "Configure guardrail type when the PII entity is detected.\n\nThe following PIIs are used to block or mask sensitive information:\n\n- *General*\n\n- *ADDRESS*\n\nA physical address, such as \"100 Main Street, Anytown, USA\" or \"Suite #12, Building 123\". An address can include information such as the street, building, location, city, state, country, county, zip code, precinct, and neighborhood.\n- *AGE*\n\nAn individual's age, including the quantity and unit of time. For example, in the phrase \"I am 40 years old,\" Guarrails recognizes \"40 years\" as an age.\n- *NAME*\n\nAn individual's name. This entity type does not include titles, such as Dr., Mr., Mrs., or Miss. guardrails doesn't apply this entity type to names that are part of organizations or addresses. For example, guardrails recognizes the \"John Doe Organization\" as an organization, and it recognizes \"Jane Doe Street\" as an address.\n- *EMAIL*\n\nAn email address, such as *marymajor@email.com* .\n- *PHONE*\n\nA phone number. This entity type also includes fax and pager numbers.\n- *USERNAME*\n\nA user name that identifies an account, such as a login name, screen name, nick name, or handle.\n- *PASSWORD*\n\nAn alphanumeric string that is used as a password, such as \"* *very20special#pass** \".\n- *DRIVER_ID*\n\nThe number assigned to a driver's license, which is an official document permitting an individual to operate one or more motorized vehicles on a public road. A driver's license number consists of alphanumeric characters.\n- *LICENSE_PLATE*\n\nA license plate for a vehicle is issued by the state or country where the vehicle is registered. The format for passenger vehicles is typically five to eight digits, consisting of upper-case letters and numbers. The format varies depending on the location of the issuing state or country.\n- *VEHICLE_IDENTIFICATION_NUMBER*\n\nA Vehicle Identification Number (VIN) uniquely identifies a vehicle. VIN content and format are defined in the *ISO 3779* specification. Each country has specific codes and formats for VINs.\n- *Finance*\n\n- *REDIT_DEBIT_CARD_CVV*\n\nA three-digit card verification code (CVV) that is present on VISA, MasterCard, and Discover credit and debit cards. For American Express credit or debit cards, the CVV is a four-digit numeric code.\n- *CREDIT_DEBIT_CARD_EXPIRY*\n\nThe expiration date for a credit or debit card. This number is usually four digits long and is often formatted as *month/year* or *MM/YY* . Guardrails recognizes expiration dates such as *01/21* , *01/2021* , and *Jan 2021* .\n- *CREDIT_DEBIT_CARD_NUMBER*\n\nThe number for a credit or debit card. These numbers can vary from 13 to 16 digits in length. However, Amazon Comprehend also recognizes credit or debit card numbers when only the last four digits are present.\n- *PIN*\n\nA four-digit personal identification number (PIN) with which you can access your bank account.\n- *INTERNATIONAL_BANK_ACCOUNT_NUMBER*\n\nAn International Bank Account Number has specific formats in each country. For more information, see [www.iban.com/structure](https://docs.aws.amazon.com/https://www.iban.com/structure) .\n- *SWIFT_CODE*\n\nA SWIFT code is a standard format of Bank Identifier Code (BIC) used to specify a particular bank or branch. Banks use these codes for money transfers such as international wire transfers.\n\nSWIFT codes consist of eight or 11 characters. The 11-digit codes refer to specific branches, while eight-digit codes (or 11-digit codes ending in 'XXX') refer to the head or primary office.\n- *IT*\n\n- *IP_ADDRESS*\n\nAn IPv4 address, such as *198.51.100.0* .\n- *MAC_ADDRESS*\n\nA *media access control* (MAC) address is a unique identifier assigned to a network interface controller (NIC).\n- *URL*\n\nA web address, such as *www.example.com* .\n- *AWS_ACCESS_KEY*\n\nA unique identifier that's associated with a secret access key; you use the access key ID and secret access key to sign programmatic AWS requests cryptographically.\n- *AWS_SECRET_KEY*\n\nA unique identifier that's associated with an access key. You use the access key ID and secret access key to sign programmatic AWS requests cryptographically.\n- *USA specific*\n\n- *US_BANK_ACCOUNT_NUMBER*\n\nA US bank account number, which is typically 10 to 12 digits long.\n- *US_BANK_ROUTING_NUMBER*\n\nA US bank account routing number. These are typically nine digits long,\n- *US_INDIVIDUAL_TAX_IDENTIFICATION_NUMBER*\n\nA US Individual Taxpayer Identification Number (ITIN) is a nine-digit number that starts with a \"9\" and contain a \"7\" or \"8\" as the fourth digit. An ITIN can be formatted with a space or a dash after the third and forth digits.\n- *US_PASSPORT_NUMBER*\n\nA US passport number. Passport numbers range from six to nine alphanumeric characters.\n- *US_SOCIAL_SECURITY_NUMBER*\n\nA US Social Security Number (SSN) is a nine-digit number that is issued to US citizens, permanent residents, and temporary working residents.\n- *Canada specific*\n\n- *CA_HEALTH_NUMBER*\n\nA Canadian Health Service Number is a 10-digit unique identifier, required for individuals to access healthcare benefits.\n- *CA_SOCIAL_INSURANCE_NUMBER*\n\nA Canadian Social Insurance Number (SIN) is a nine-digit unique identifier, required for individuals to access government programs and benefits.\n\nThe SIN is formatted as three groups of three digits, such as *123-456-789* . A SIN can be validated through a simple check-digit process called the [Luhn algorithm](https://docs.aws.amazon.com/https://www.wikipedia.org/wiki/Luhn_algorithm) .\n- *UK Specific*\n\n- *UK_NATIONAL_HEALTH_SERVICE_NUMBER*\n\nA UK National Health Service Number is a 10-17 digit number, such as *485 777 3456* . The current system formats the 10-digit number with spaces after the third and sixth digits. The final digit is an error-detecting checksum.\n- *UK_NATIONAL_INSURANCE_NUMBER*\n\nA UK National Insurance Number (NINO) provides individuals with access to National Insurance (social security) benefits. It is also used for some purposes in the UK tax system.\n\nThe number is nine digits long and starts with two letters, followed by six numbers and one letter. A NINO can be formatted with a space or a dash after the two letters and after the second, forth, and sixth digits.\n- *UK_UNIQUE_TAXPAYER_REFERENCE_NUMBER*\n\nA UK Unique Taxpayer Reference (UTR) is a 10-digit number that identifies a taxpayer or a business.\n- *Custom*\n\n- *Regex filter* - You can use a regular expressions to define patterns for a guardrail to recognize and act upon such as serial number, booking ID etc..", + "markdownDescription": "Configure guardrail type when the PII entity is detected.\n\nThe following PIIs are used to block or mask sensitive information:\n\n- *General*\n\n- *ADDRESS*\n\nA physical address, such as \"100 Main Street, Anytown, USA\" or \"Suite #12, Building 123\". An address can include information such as the street, building, location, city, state, country, county, zip code, precinct, and neighborhood.\n- *AGE*\n\nAn individual's age, including the quantity and unit of time. For example, in the phrase \"I am 40 years old,\" Guardrails recognizes \"40 years\" as an age.\n- *NAME*\n\nAn individual's name. This entity type does not include titles, such as Dr., Mr., Mrs., or Miss. guardrails doesn't apply this entity type to names that are part of organizations or addresses. For example, guardrails recognizes the \"John Doe Organization\" as an organization, and it recognizes \"Jane Doe Street\" as an address.\n- *EMAIL*\n\nAn email address, such as *marymajor@email.com* .\n- *PHONE*\n\nA phone number. This entity type also includes fax and pager numbers.\n- *USERNAME*\n\nA user name that identifies an account, such as a login name, screen name, nick name, or handle.\n- *PASSWORD*\n\nAn alphanumeric string that is used as a password, such as \"* *very20special#pass** \".\n- *DRIVER_ID*\n\nThe number assigned to a driver's license, which is an official document permitting an individual to operate one or more motorized vehicles on a public road. A driver's license number consists of alphanumeric characters.\n- *LICENSE_PLATE*\n\nA license plate for a vehicle is issued by the state or country where the vehicle is registered. The format for passenger vehicles is typically five to eight digits, consisting of upper-case letters and numbers. The format varies depending on the location of the issuing state or country.\n- *VEHICLE_IDENTIFICATION_NUMBER*\n\nA Vehicle Identification Number (VIN) uniquely identifies a vehicle. VIN content and format are defined in the *ISO 3779* specification. Each country has specific codes and formats for VINs.\n- *Finance*\n\n- *CREDIT_DEBIT_CARD_CVV*\n\nA three-digit card verification code (CVV) that is present on VISA, MasterCard, and Discover credit and debit cards. For American Express credit or debit cards, the CVV is a four-digit numeric code.\n- *CREDIT_DEBIT_CARD_EXPIRY*\n\nThe expiration date for a credit or debit card. This number is usually four digits long and is often formatted as *month/year* or *MM/YY* . Guardrails recognizes expiration dates such as *01/21* , *01/2021* , and *Jan 2021* .\n- *CREDIT_DEBIT_CARD_NUMBER*\n\nThe number for a credit or debit card. These numbers can vary from 13 to 16 digits in length. However, Amazon Comprehend also recognizes credit or debit card numbers when only the last four digits are present.\n- *PIN*\n\nA four-digit personal identification number (PIN) with which you can access your bank account.\n- *INTERNATIONAL_BANK_ACCOUNT_NUMBER*\n\nAn International Bank Account Number has specific formats in each country. For more information, see [www.iban.com/structure](https://docs.aws.amazon.com/https://www.iban.com/structure) .\n- *SWIFT_CODE*\n\nA SWIFT code is a standard format of Bank Identifier Code (BIC) used to specify a particular bank or branch. Banks use these codes for money transfers such as international wire transfers.\n\nSWIFT codes consist of eight or 11 characters. The 11-digit codes refer to specific branches, while eight-digit codes (or 11-digit codes ending in 'XXX') refer to the head or primary office.\n- *IT*\n\n- *IP_ADDRESS*\n\nAn IPv4 address, such as *198.51.100.0* .\n- *MAC_ADDRESS*\n\nA *media access control* (MAC) address is a unique identifier assigned to a network interface controller (NIC).\n- *URL*\n\nA web address, such as *www.example.com* .\n- *AWS_ACCESS_KEY*\n\nA unique identifier that's associated with a secret access key; you use the access key ID and secret access key to sign programmatic AWS requests cryptographically.\n- *AWS_SECRET_KEY*\n\nA unique identifier that's associated with an access key. You use the access key ID and secret access key to sign programmatic AWS requests cryptographically.\n- *USA specific*\n\n- *US_BANK_ACCOUNT_NUMBER*\n\nA US bank account number, which is typically 10 to 12 digits long.\n- *US_BANK_ROUTING_NUMBER*\n\nA US bank account routing number. These are typically nine digits long,\n- *US_INDIVIDUAL_TAX_IDENTIFICATION_NUMBER*\n\nA US Individual Taxpayer Identification Number (ITIN) is a nine-digit number that starts with a \"9\" and contain a \"7\" or \"8\" as the fourth digit. An ITIN can be formatted with a space or a dash after the third and forth digits.\n- *US_PASSPORT_NUMBER*\n\nA US passport number. Passport numbers range from six to nine alphanumeric characters.\n- *US_SOCIAL_SECURITY_NUMBER*\n\nA US Social Security Number (SSN) is a nine-digit number that is issued to US citizens, permanent residents, and temporary working residents.\n- *Canada specific*\n\n- *CA_HEALTH_NUMBER*\n\nA Canadian Health Service Number is a 10-digit unique identifier, required for individuals to access healthcare benefits.\n- *CA_SOCIAL_INSURANCE_NUMBER*\n\nA Canadian Social Insurance Number (SIN) is a nine-digit unique identifier, required for individuals to access government programs and benefits.\n\nThe SIN is formatted as three groups of three digits, such as *123-456-789* . A SIN can be validated through a simple check-digit process called the [Luhn algorithm](https://docs.aws.amazon.com/https://www.wikipedia.org/wiki/Luhn_algorithm) .\n- *UK Specific*\n\n- *UK_NATIONAL_HEALTH_SERVICE_NUMBER*\n\nA UK National Health Service Number is a 10-17 digit number, such as *485 777 3456* . The current system formats the 10-digit number with spaces after the third and sixth digits. The final digit is an error-detecting checksum.\n- *UK_NATIONAL_INSURANCE_NUMBER*\n\nA UK National Insurance Number (NINO) provides individuals with access to National Insurance (social security) benefits. It is also used for some purposes in the UK tax system.\n\nThe number is nine digits long and starts with two letters, followed by six numbers and one letter. A NINO can be formatted with a space or a dash after the two letters and after the second, forth, and sixth digits.\n- *UK_UNIQUE_TAXPAYER_REFERENCE_NUMBER*\n\nA UK Unique Taxpayer Reference (UTR) is a 10-digit number that identifies a taxpayer or a business.\n- *Custom*\n\n- *Regex filter* - You can use a regular expressions to define patterns for a guardrail to recognize and act upon such as serial number, booking ID etc..", "title": "Type", "type": "string" } @@ -33254,7 +33202,7 @@ }, "TableReference": { "$ref": "#/definitions/AWS::CleanRooms::ConfiguredTable.TableReference", - "markdownDescription": "The AWS Glue table that this configured table represents.", + "markdownDescription": "The table that this configured table represents.", "title": "TableReference" }, "Tags": { @@ -33928,7 +33876,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "", + "markdownDescription": "An optional label that you can assign to a resource when you create it. Each tag consists of a key and an optional value, both of which you define. When you use tagging, you can also use tag-based access control in IAM policies to control access to this resource.", "title": "Tags", "type": "array" } @@ -34502,7 +34450,7 @@ "type": "string" }, "ConfigurationAlias": { - "markdownDescription": "Specifies the activated Hook type configuration, in this AWS account and AWS Region .\n\nDefaults to `default` alias. Hook types currently support default configuration alias.", + "markdownDescription": "An alias by which to refer to this configuration data.\n\nDefaults to `default` alias. Hook types currently support default configuration alias.", "title": "ConfigurationAlias", "type": "string" }, @@ -34589,7 +34537,7 @@ "title": "LoggingConfig" }, "SchemaHandlerPackage": { - "markdownDescription": "A URL to the Amazon S3 bucket containing the Hook project package that contains the necessary files for the Hook you want to register.\n\nFor information on generating a schema handler package for the resource you want to register, see [submit](https://docs.aws.amazon.com/cloudformation-cli/latest/userguide/resource-type-cli-submit.html) in the *CloudFormation CLI User Guide for Extension Development* .\n\n> The user registering the resource must be able to access the package in the S3 bucket. That's, the user must have [GetObject](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) permissions for the schema handler package. For more information, see [Actions, Resources, and Condition Keys for Amazon S3](https://docs.aws.amazon.com/IAM/latest/UserGuide/list_amazons3.html) in the *AWS Identity and Access Management User Guide* .", + "markdownDescription": "A URL to the Amazon S3 bucket containing the Hook project package that contains the necessary files for the Hook you want to register.\n\nFor information on generating a schema handler package, see [Modeling custom CloudFormation Hooks](https://docs.aws.amazon.com/cloudformation-cli/latest/hooks-userguide/hooks-model.html) in the *AWS CloudFormation Hooks User Guide* .\n\n> To register the Hook, you must have `s3:GetObject` permissions to access the S3 objects.", "title": "SchemaHandlerPackage", "type": "string" }, @@ -34683,17 +34631,17 @@ "type": "string" }, "FunctionName": { - "markdownDescription": "The Amazon Resource Name (ARN) of the underlying AWS Lambda function that you want AWS CloudFormation to invoke when the macro is run.", + "markdownDescription": "The Amazon Resource Name (ARN) of the underlying Lambda function that you want CloudFormation to invoke when the macro is run.", "title": "FunctionName", "type": "string" }, "LogGroupName": { - "markdownDescription": "The CloudWatch Logs group to which AWS CloudFormation sends error logging information when invoking the macro's underlying AWS Lambda function.", + "markdownDescription": "The CloudWatch Logs group to which CloudFormation sends error logging information when invoking the macro's underlying Lambda function.", "title": "LogGroupName", "type": "string" }, "LogRoleARN": { - "markdownDescription": "The ARN of the role AWS CloudFormation should assume when sending log entries to CloudWatch Logs .", + "markdownDescription": "The ARN of the role CloudFormation should assume when sending log entries to CloudWatch Logs .", "title": "LogRoleARN", "type": "string" }, @@ -34844,7 +34792,7 @@ "type": "string" }, "ModulePackage": { - "markdownDescription": "A URL to the S3 bucket containing the package that contains the template fragment and schema files for the module version to register.\n\n> The user registering the module version must be able to access the module package in the S3 bucket. That's, the user needs to have [GetObject](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) permissions for the package. For more information, see [Actions, Resources, and Condition Keys for Amazon S3](https://docs.aws.amazon.com/IAM/latest/UserGuide/list_amazons3.html) in the *AWS Identity and Access Management User Guide* .", + "markdownDescription": "A URL to the S3 bucket containing the package that contains the template fragment and schema files for the module version to register.\n\nFor more information, see [Module structure and requirements](https://docs.aws.amazon.com/cloudformation-cli/latest/userguide/modules-structure.html) in the *AWS CloudFormation Command Line Interface (CLI) User Guide* .\n\n> To register the module version, you must have `s3:GetObject` permissions to access the S3 objects.", "title": "ModulePackage", "type": "string" } @@ -34917,7 +34865,7 @@ "type": "string" }, "LogDeliveryBucket": { - "markdownDescription": "The S3 bucket to which CloudFormation delivers the contract test execution logs.\n\nCloudFormation delivers the logs by the time contract testing has completed and the extension has been assigned a test type status of `PASSED` or `FAILED` .\n\nThe user initiating the stack operation must be able to access items in the specified S3 bucket. Specifically, the user needs the following permissions:\n\n- GetObject\n- PutObject\n\nFor more information, see [Actions, Resources, and Condition Keys for Amazon S3](https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazons3.html) in the *AWS Identity and Access Management User Guide* .", + "markdownDescription": "The S3 bucket to which CloudFormation delivers the contract test execution logs.\n\nCloudFormation delivers the logs by the time contract testing has completed and the extension has been assigned a test type status of `PASSED` or `FAILED` .\n\nThe user initiating the stack operation must be able to access items in the specified S3 bucket. Specifically, the user needs the following permissions:\n\n- s3:GetObject\n- s3:PutObject", "title": "LogDeliveryBucket", "type": "string" }, @@ -35140,7 +35088,7 @@ "additionalProperties": false, "properties": { "ExecutionRoleArn": { - "markdownDescription": "The Amazon Resource Name (ARN) of the IAM role for CloudFormation to assume when invoking the resource. If your resource calls AWS APIs in any of its handlers, you must create an *[IAM execution role](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html)* that includes the necessary permissions to call those AWS APIs, and provision that execution role in your account. When CloudFormation needs to invoke the resource type handler, CloudFormation assumes this execution role to create a temporary session token, which it then passes to the resource type handler, thereby supplying your resource type with the appropriate credentials.", + "markdownDescription": "The Amazon Resource Name (ARN) of the IAM role for CloudFormation to assume when invoking the resource. If your resource calls AWS APIs in any of its handlers, you must create an IAM execution role that includes the necessary permissions to call those AWS APIs, and provision that execution role in your account. When CloudFormation needs to invoke the resource type handler, CloudFormation assumes this execution role to create a temporary session token, which it then passes to the resource type handler, thereby supplying your resource type with the appropriate credentials.", "title": "ExecutionRoleArn", "type": "string" }, @@ -35150,7 +35098,7 @@ "title": "LoggingConfig" }, "SchemaHandlerPackage": { - "markdownDescription": "A URL to the S3 bucket containing the resource project package that contains the necessary files for the resource you want to register.\n\nFor information on generating a schema handler package for the resource you want to register, see [submit](https://docs.aws.amazon.com/cloudformation-cli/latest/userguide/resource-type-cli-submit.html) in the *CloudFormation CLI User Guide* .\n\n> The user registering the resource must be able to access the package in the S3 bucket. That is, the user needs to have [GetObject](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) permissions for the schema handler package. For more information, see [Actions, Resources, and Condition Keys for Amazon S3](https://docs.aws.amazon.com/IAM/latest/UserGuide/list_amazons3.html) in the *AWS Identity and Access Management User Guide* .", + "markdownDescription": "A URL to the S3 bucket containing the resource project package that contains the necessary files for the resource you want to register.\n\nFor information on generating a schema handler package, see [Modeling resource types to use with AWS CloudFormation](https://docs.aws.amazon.com/cloudformation-cli/latest/userguide/resource-type-model.html) in the *AWS CloudFormation Command Line Interface (CLI) User Guide* .\n\n> To register the resource version, you must have `s3:GetObject` permissions to access the S3 objects.", "title": "SchemaHandlerPackage", "type": "string" }, @@ -35266,7 +35214,7 @@ "type": "array" }, "TemplateURL": { - "markdownDescription": "Location of file containing the template body. The URL must point to a template (max size: 460,800 bytes) that's located in an Amazon S3 bucket. For more information, see [Template anatomy](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-anatomy.html) .\n\nWhether an update causes interruptions depends on the resources that are being updated. An update never causes a nested stack to be replaced.", + "markdownDescription": "The URL of a file containing the template body. The URL must point to a template (max size: 1 MB) that's located in an Amazon S3 bucket. The location for an Amazon S3 bucket must start with `https://` .\n\nWhether an update causes interruptions depends on the resources that are being updated. An update never causes a nested stack to be replaced.", "title": "TemplateURL", "type": "string" }, @@ -35338,7 +35286,7 @@ "additionalProperties": false, "properties": { "AdministrationRoleARN": { - "markdownDescription": "The Amazon Resource Number (ARN) of the IAM role to use to create this stack set. Specify an IAM role only if you are using customized administrator roles to control which users or groups can manage specific stack sets within the same administrator account.\n\nUse customized administrator roles to control which users or groups can manage specific stack sets within the same administrator account. For more information, see [Prerequisites: Granting Permissions for Stack Set Operations](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/stacksets-prereqs.html) in the *AWS CloudFormation User Guide* .\n\n*Minimum* : `20`\n\n*Maximum* : `2048`", + "markdownDescription": "The Amazon Resource Number (ARN) of the IAM role to use to create this stack set. Specify an IAM role only if you are using customized administrator roles to control which users or groups can manage specific stack sets within the same administrator account.\n\nUse customized administrator roles to control which users or groups can manage specific stack sets within the same administrator account. For more information, see [Grant self-managed permissions](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/stacksets-prereqs-self-managed.html) in the *AWS CloudFormation User Guide* .\n\n*Minimum* : `20`\n\n*Maximum* : `2048`", "title": "AdministrationRoleARN", "type": "string" }, @@ -35356,7 +35304,7 @@ "items": { "type": "string" }, - "markdownDescription": "The capabilities that are allowed in the stack set. Some stack set templates might include resources that can affect permissions in your AWS account \u2014for example, by creating new AWS Identity and Access Management ( IAM ) users. For more information, see [Acknowledging IAM Resources in AWS CloudFormation Templates](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-iam-template.html#capabilities) .", + "markdownDescription": "The capabilities that are allowed in the stack set. Some stack set templates might include resources that can affect permissions in your AWS account \u2014for example, by creating new IAM users. For more information, see [Acknowledging IAM resources in CloudFormation templates](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/control-access-with-iam.html#using-iam-capabilities) in the *AWS CloudFormation User Guide* .", "title": "Capabilities", "type": "array" }, @@ -35366,7 +35314,7 @@ "type": "string" }, "ExecutionRoleName": { - "markdownDescription": "The name of the IAM execution role to use to create the stack set. If you don't specify an execution role, AWS CloudFormation uses the `AWSCloudFormationStackSetExecutionRole` role for the stack set operation.\n\n*Minimum* : `1`\n\n*Maximum* : `64`\n\n*Pattern* : `[a-zA-Z_0-9+=,.@-]+`", + "markdownDescription": "The name of the IAM execution role to use to create the stack set. If you don't specify an execution role, CloudFormation uses the `AWSCloudFormationStackSetExecutionRole` role for the stack set operation.\n\n*Minimum* : `1`\n\n*Maximum* : `64`\n\n*Pattern* : `[a-zA-Z_0-9+=,.@-]+`", "title": "ExecutionRoleName", "type": "string" }, @@ -35377,7 +35325,7 @@ }, "OperationPreferences": { "$ref": "#/definitions/AWS::CloudFormation::StackSet.OperationPreferences", - "markdownDescription": "The user-specified preferences for how AWS CloudFormation performs a stack set operation.", + "markdownDescription": "The user-specified preferences for how CloudFormation performs a stack set operation.", "title": "OperationPreferences" }, "Parameters": { @@ -35389,7 +35337,7 @@ "type": "array" }, "PermissionModel": { - "markdownDescription": "Describes how the IAM roles required for stack set operations are created.\n\n- With `SELF_MANAGED` permissions, you must create the administrator and execution roles required to deploy to target accounts. For more information, see [Grant Self-Managed Stack Set Permissions](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/stacksets-prereqs-self-managed.html) .\n- With `SERVICE_MANAGED` permissions, StackSets automatically creates the IAM roles required to deploy to accounts managed by AWS Organizations .", + "markdownDescription": "Describes how the IAM roles required for stack set operations are created.\n\n- With `SELF_MANAGED` permissions, you must create the administrator and execution roles required to deploy to target accounts. For more information, see [Grant self-managed permissions](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/stacksets-prereqs-self-managed.html) in the *AWS CloudFormation User Guide* .\n- With `SERVICE_MANAGED` permissions, StackSets automatically creates the IAM roles required to deploy to accounts managed by AWS Organizations . For more information, see [Activate trusted access for stack sets with AWS Organizations](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/stacksets-orgs-activate-trusted-access.html) in the *AWS CloudFormation User Guide* .", "title": "PermissionModel", "type": "string" }, @@ -35420,7 +35368,7 @@ "type": "string" }, "TemplateURL": { - "markdownDescription": "Location of file containing the template body. The URL must point to a template that's located in an Amazon S3 bucket or a Systems Manager document. For more information, go to [Template Anatomy](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-anatomy.html) in the AWS CloudFormation User Guide.\n\nConditional: You must specify only one of the following parameters: `TemplateBody` , `TemplateURL` .", + "markdownDescription": "The URL of a file containing the template body. The URL must point to a template (max size: 1 MB) that's located in an Amazon S3 bucket or a Systems Manager document. The location for an Amazon S3 bucket must start with `https://` .\n\nConditional: You must specify only one of the following parameters: `TemplateBody` , `TemplateURL` .", "title": "TemplateURL", "type": "string" } @@ -35480,12 +35428,12 @@ "items": { "type": "string" }, - "markdownDescription": "The names of one or more AWS accounts for which you want to deploy stack set updates.\n\n*Pattern* : `^[0-9]{12}$`", + "markdownDescription": "The account IDs of the AWS accounts . If you have many account numbers, you can provide those accounts using the `AccountsUrl` property instead.\n\n*Pattern* : `^[0-9]{12}$`", "title": "Accounts", "type": "array" }, "AccountsUrl": { - "markdownDescription": "Returns the value of the `AccountsUrl` property.", + "markdownDescription": "The Amazon S3 URL path to a file that contains a list of AWS account IDs. The file format must be either `.csv` or `.txt` , and the data can be comma-separated or new-line-separated. There is currently a 10MB limit for the data (approximately 800,000 accounts).", "title": "AccountsUrl", "type": "string" }, @@ -35493,7 +35441,7 @@ "items": { "type": "string" }, - "markdownDescription": "The organization root ID or organizational unit (OU) IDs to which StackSets deploys.\n\n*Pattern* : `^(ou-[a-z0-9]{4,32}-[a-z0-9]{8,32}|r-[a-z0-9]{4,32})$`", + "markdownDescription": "The organization root ID or organizational unit (OU) IDs.\n\n*Pattern* : `^(ou-[a-z0-9]{4,32}-[a-z0-9]{8,32}|r-[a-z0-9]{4,32})$`", "title": "OrganizationalUnitIds", "type": "array" } @@ -35515,12 +35463,12 @@ "additionalProperties": false, "properties": { "FailureToleranceCount": { - "markdownDescription": "The number of accounts, per Region, for which this operation can fail before AWS CloudFormation stops the operation in that Region. If the operation is stopped in a Region, AWS CloudFormation doesn't attempt the operation in any subsequent Regions.\n\nConditional: You must specify either `FailureToleranceCount` or `FailureTolerancePercentage` (but not both).", + "markdownDescription": "The number of accounts, per Region, for which this operation can fail before CloudFormation stops the operation in that Region. If the operation is stopped in a Region, CloudFormation doesn't attempt the operation in any subsequent Regions.\n\nConditional: You must specify either `FailureToleranceCount` or `FailureTolerancePercentage` (but not both).", "title": "FailureToleranceCount", "type": "number" }, "FailureTolerancePercentage": { - "markdownDescription": "The percentage of accounts, per Region, for which this stack operation can fail before AWS CloudFormation stops the operation in that Region. If the operation is stopped in a Region, AWS CloudFormation doesn't attempt the operation in any subsequent Regions.\n\nWhen calculating the number of accounts based on the specified percentage, AWS CloudFormation rounds *down* to the next whole number.\n\nConditional: You must specify either `FailureToleranceCount` or `FailureTolerancePercentage` , but not both.", + "markdownDescription": "The percentage of accounts, per Region, for which this stack operation can fail before CloudFormation stops the operation in that Region. If the operation is stopped in a Region, CloudFormation doesn't attempt the operation in any subsequent Regions.\n\nWhen calculating the number of accounts based on the specified percentage, CloudFormation rounds *down* to the next whole number.\n\nConditional: You must specify either `FailureToleranceCount` or `FailureTolerancePercentage` , but not both.", "title": "FailureTolerancePercentage", "type": "number" }, @@ -35530,7 +35478,7 @@ "type": "number" }, "MaxConcurrentPercentage": { - "markdownDescription": "The maximum percentage of accounts in which to perform this operation at one time.\n\nWhen calculating the number of accounts based on the specified percentage, AWS CloudFormation rounds down to the next whole number. This is true except in cases where rounding down would result is zero. In this case, CloudFormation sets the number as one instead.\n\nNote that this setting lets you specify the *maximum* for operations. For large deployments, under certain circumstances the actual number of accounts acted upon concurrently may be lower due to service throttling.\n\nConditional: You must specify either `MaxConcurrentCount` or `MaxConcurrentPercentage` , but not both.", + "markdownDescription": "The maximum percentage of accounts in which to perform this operation at one time.\n\nWhen calculating the number of accounts based on the specified percentage, CloudFormation rounds down to the next whole number. This is true except in cases where rounding down would result is zero. In this case, CloudFormation sets the number as one instead.\n\nNote that this setting lets you specify the *maximum* for operations. For large deployments, under certain circumstances the actual number of accounts acted upon concurrently may be lower due to service throttling.\n\nConditional: You must specify either `MaxConcurrentCount` or `MaxConcurrentPercentage` , but not both.", "title": "MaxConcurrentPercentage", "type": "number" }, @@ -35554,7 +35502,7 @@ "additionalProperties": false, "properties": { "ParameterKey": { - "markdownDescription": "The key associated with the parameter. If you don't specify a key and value for a particular parameter, AWS CloudFormation uses the default value that's specified in your template.", + "markdownDescription": "The key associated with the parameter. If you don't specify a key and value for a particular parameter, CloudFormation uses the default value that's specified in your template.", "title": "ParameterKey", "type": "string" }, @@ -35769,7 +35717,7 @@ "type": "number" }, "Handle": { - "markdownDescription": "A reference to the wait condition handle used to signal this wait condition. Use the `Ref` intrinsic function to specify an [`AWS::CloudFormation::WaitConditionHandle`](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-waitconditionhandle.html) resource.\n\nAnytime you add a `WaitCondition` resource during a stack update, you must associate the wait condition with a new WaitConditionHandle resource. Don't reuse an old wait condition handle that has already been defined in the template. If you reuse a wait condition handle, the wait condition might evaluate old signals from a previous create or update stack command.\n\nUpdates aren't supported.", + "markdownDescription": "A reference to the wait condition handle used to signal this wait condition. Use the `Ref` intrinsic function to specify an [AWS::CloudFormation::WaitConditionHandle](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-waitconditionhandle.html) resource.\n\nAnytime you add a `WaitCondition` resource during a stack update, you must associate the wait condition with a new WaitConditionHandle resource. Don't reuse an old wait condition handle that has already been defined in the template. If you reuse a wait condition handle, the wait condition might evaluate old signals from a previous create or update stack command.\n\nUpdates aren't supported.", "title": "Handle", "type": "string" }, @@ -36653,7 +36601,7 @@ "type": "number" }, "OriginKeepaliveTimeout": { - "markdownDescription": "Specifies how long, in seconds, CloudFront persists its connection to the origin. The minimum timeout is 1 second, the maximum is 60 seconds, and the default (if you don't specify otherwise) is 5 seconds.\n\nFor more information, see [Origin Keep-alive Timeout](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-specify.html#DownloadDistValuesOriginKeepaliveTimeout) in the *Amazon CloudFront Developer Guide* .", + "markdownDescription": "Specifies how long, in seconds, CloudFront persists its connection to the origin. The minimum timeout is 1 second, the maximum is 60 seconds, and the default (if you don't specify otherwise) is 5 seconds.\n\nFor more information, see [Keep-alive timeout (custom origins only)](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-specify.html#DownloadDistValuesOriginKeepaliveTimeout) in the *Amazon CloudFront Developer Guide* .", "title": "OriginKeepaliveTimeout", "type": "number" }, @@ -36663,7 +36611,7 @@ "type": "string" }, "OriginReadTimeout": { - "markdownDescription": "Specifies how long, in seconds, CloudFront waits for a response from the origin. This is also known as the *origin response timeout* . The minimum timeout is 1 second, the maximum is 60 seconds, and the default (if you don't specify otherwise) is 30 seconds.\n\nFor more information, see [Origin Response Timeout](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-specify.html#DownloadDistValuesOriginResponseTimeout) in the *Amazon CloudFront Developer Guide* .", + "markdownDescription": "Specifies how long, in seconds, CloudFront waits for a response from the origin. This is also known as the *origin response timeout* . The minimum timeout is 1 second, the maximum is 60 seconds, and the default (if you don't specify otherwise) is 30 seconds.\n\nFor more information, see [Response timeout (custom origins only)](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-specify.html#DownloadDistValuesOriginResponseTimeout) in the *Amazon CloudFront Developer Guide* .", "title": "OriginReadTimeout", "type": "number" }, @@ -36860,7 +36808,7 @@ "title": "DefaultCacheBehavior" }, "DefaultRootObject": { - "markdownDescription": "The object that you want CloudFront to request from your origin (for example, `index.html` ) when a viewer requests the root URL for your distribution ( `https://www.example.com` ) instead of an object in your distribution ( `https://www.example.com/product-description.html` ). Specifying a default root object avoids exposing the contents of your distribution.\n\nSpecify only the object name, for example, `index.html` . Don't add a `/` before the object name.\n\nIf you don't want to specify a default root object when you create a distribution, include an empty `DefaultRootObject` element.\n\nTo delete the default root object from an existing distribution, update the distribution configuration and include an empty `DefaultRootObject` element.\n\nTo replace the default root object, update the distribution configuration and specify the new object.\n\nFor more information about the default root object, see [Creating a Default Root Object](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/DefaultRootObject.html) in the *Amazon CloudFront Developer Guide* .", + "markdownDescription": "When a viewer requests the root URL for your distribution, the default root object is the object that you want CloudFront to request from your origin. For example, if your root URL is `https://www.example.com` , you can specify CloudFront to return the `index.html` file as the default root object. You can specify a default root object so that viewers see a specific file or object, instead of another object in your distribution (for example, `https://www.example.com/product-description.html` ). A default root object avoids exposing the contents of your distribution.\n\nYou can specify the object name or a path to the object name (for example, `index.html` or `exampleFolderName/index.html` ). Your string can't begin with a forward slash ( `/` ). Only specify the object name or the path to the object.\n\nIf you don't want to specify a default root object when you create a distribution, include an empty `DefaultRootObject` element.\n\nTo delete the default root object from an existing distribution, update the distribution configuration and include an empty `DefaultRootObject` element.\n\nTo replace the default root object, update the distribution configuration and specify the new object.\n\nFor more information about the default root object, see [Specify a default root object](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/DefaultRootObject.html) in the *Amazon CloudFront Developer Guide* .", "title": "DefaultRootObject", "type": "string" }, @@ -39289,7 +39237,7 @@ "type": "array" }, "Field": { - "markdownDescription": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `eventCategory` (required), `eventSource` , and `readOnly` . The following additional fields are available for event data stores: `eventName` , `eventType` , `sessionCredentialFromConsole` , and `userIdentity.arn` .\n\nFor CloudTrail data events, supported fields include `eventCategory` (required), `resources.type` (required), `eventName` , `readOnly` , and `resources.ARN` . The following additional fields are available for event data stores: `eventSource` , `eventType` , `sessionCredentialFromConsole` , and `userIdentity.arn` .\n\nFor CloudTrail network activity events, supported fields include `eventCategory` (required), `eventSource` (required), `eventName` , `errorCode` , and `vpcEndpointId` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - This is an optional field that is only used for management events and data events. This field can be set to `Equals` with a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - This field is only used for management events, data events (for event data stores only), and network activity events.\n\nFor management events for trails, this is an optional field that can be set to `NotEquals` `kms.amazonaws.com` to exclude KMS management events, or `NotEquals` `rdsdata.amazonaws.com` to exclude RDS management events.\n\nFor management and data events for event data stores, you can use it to include or exclude any event source and can use any operator.\n\nFor network activity events, this is a required field that only uses the `Equals` operator. Set this field to the event source for which you want to log network activity events. If you want to log network activity events for multiple event sources, you must create a separate field selector for each event source.\n\nThe following are valid values for network activity events:\n\n- `cloudtrail.amazonaws.com`\n- `ec2.amazonaws.com`\n- `kms.amazonaws.com`\n- `secretsmanager.amazonaws.com`\n- *`eventName`* - This is an optional field that is only used for data events, management events (for event data stores only), and network activity events. You can use any operator with `eventName` . You can use it to \ufb01lter in or \ufb01lter out specific events. You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This field is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n- For CloudTrail network activity events, the value must be `NetworkActivity` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For events outside of AWS , the value must be `ActivityAuditLog` .\n- *`eventType`* - This is an optional field available only for event data stores, which is used to filter management and data events on the event type. For information about available event types, see [CloudTrail record contents](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-event-reference-record-contents.html#ct-event-type) in the *AWS CloudTrail user guide* .\n- *`errorCode`* - This \ufb01eld is only used to filter CloudTrail network activity events and is optional. This is the error code to filter on. Currently, the only valid `errorCode` is `VpceAccessDenied` . `errorCode` can only use the `Equals` operator.\n- *`sessionCredentialFromConsole`* - This is an optional field available only for event data stores, which is used to filter management and data events based on whether the events originated from an AWS Management Console session. `sessionCredentialFromConsole` can only use the `Equals` and `NotEquals` operators.\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator.\n\nFor a list of available resource types for data events, see [Data events](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-data-events-with-cloudtrail.html#logging-data-events) in the *AWS CloudTrail User Guide* .\n\nYou can have only one `resources.type` \ufb01eld per selector. To log events on more than one resource type, add another selector.\n- *`resources.ARN`* - The `resources.ARN` is an optional field for data events. You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value.\n\nFor information about filtering data events on the `resources.ARN` field, see [Filtering data events by resources.ARN](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/filtering-data-events.html#filtering-data-events-resourcearn) in the *AWS CloudTrail User Guide* .\n\n> You can't use the `resources.ARN` field to filter resource types that do not have ARNs.\n- *`userIdentity.arn`* - This is an optional field available only for event data stores, which is used to filter management and data events on the userIdentity ARN. You can use any operator with `userIdentity.arn` . For more information on the userIdentity element, see [CloudTrail userIdentity element](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-event-reference-user-identity.html) in the *AWS CloudTrail User Guide* .\n- *`vpcEndpointId`* - This \ufb01eld is only used to filter CloudTrail network activity events and is optional. This field identifies the VPC endpoint that the request passed through. You can use any operator with `vpcEndpointId` .", + "markdownDescription": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `eventCategory` (required), `eventSource` , and `readOnly` . The following additional fields are available for event data stores: `eventName` , `eventType` , `sessionCredentialFromConsole` , and `userIdentity.arn` .\n\nFor CloudTrail data events, supported fields include `eventCategory` (required), `resources.type` (required), `eventName` , `readOnly` , and `resources.ARN` . The following additional fields are available for event data stores: `eventSource` , `eventType` , `sessionCredentialFromConsole` , and `userIdentity.arn` .\n\nFor CloudTrail network activity events, supported fields include `eventCategory` (required), `eventSource` (required), `eventName` , `errorCode` , and `vpcEndpointId` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n> Selectors don't support the use of wildcards like `*` . To match multiple values with a single condition, you may use `StartsWith` , `EndsWith` , `NotStartsWith` , or `NotEndsWith` to explicitly match the beginning or end of the event field. \n\n- *`readOnly`* - This is an optional field that is only used for management events and data events. This field can be set to `Equals` with a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - This field is only used for management events, data events (for event data stores only), and network activity events.\n\nFor management events for trails, this is an optional field that can be set to `NotEquals` `kms.amazonaws.com` to exclude KMS management events, or `NotEquals` `rdsdata.amazonaws.com` to exclude RDS management events.\n\nFor management and data events for event data stores, you can use it to include or exclude any event source and can use any operator.\n\nFor network activity events, this is a required field that only uses the `Equals` operator. Set this field to the event source for which you want to log network activity events. If you want to log network activity events for multiple event sources, you must create a separate field selector for each event source.\n\nThe following are valid values for network activity events:\n\n- `cloudtrail.amazonaws.com`\n- `ec2.amazonaws.com`\n- `kms.amazonaws.com`\n- `secretsmanager.amazonaws.com`\n- *`eventName`* - This is an optional field that is only used for data events, management events (for event data stores only), and network activity events. You can use any operator with `eventName` . You can use it to \ufb01lter in or \ufb01lter out specific events. You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This field is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n- For CloudTrail network activity events, the value must be `NetworkActivity` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For events outside of AWS , the value must be `ActivityAuditLog` .\n- *`eventType`* - This is an optional field available only for event data stores, which is used to filter management and data events on the event type. For information about available event types, see [CloudTrail record contents](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-event-reference-record-contents.html#ct-event-type) in the *AWS CloudTrail user guide* .\n- *`errorCode`* - This \ufb01eld is only used to filter CloudTrail network activity events and is optional. This is the error code to filter on. Currently, the only valid `errorCode` is `VpceAccessDenied` . `errorCode` can only use the `Equals` operator.\n- *`sessionCredentialFromConsole`* - This is an optional field available only for event data stores, which is used to filter management and data events based on whether the events originated from an AWS Management Console session. `sessionCredentialFromConsole` can only use the `Equals` and `NotEquals` operators.\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator.\n\nFor a list of available resource types for data events, see [Data events](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-data-events-with-cloudtrail.html#logging-data-events) in the *AWS CloudTrail User Guide* .\n\nYou can have only one `resources.type` \ufb01eld per selector. To log events on more than one resource type, add another selector.\n- *`resources.ARN`* - The `resources.ARN` is an optional field for data events. You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value.\n\nFor information about filtering data events on the `resources.ARN` field, see [Filtering data events by resources.ARN](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/filtering-data-events.html#filtering-data-events-resourcearn) in the *AWS CloudTrail User Guide* .\n\n> You can't use the `resources.ARN` field to filter resource types that do not have ARNs.\n- *`userIdentity.arn`* - This is an optional field available only for event data stores, which is used to filter management and data events on the userIdentity ARN. You can use any operator with `userIdentity.arn` . For more information on the userIdentity element, see [CloudTrail userIdentity element](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-event-reference-user-identity.html) in the *AWS CloudTrail User Guide* .\n- *`vpcEndpointId`* - This \ufb01eld is only used to filter CloudTrail network activity events and is optional. This field identifies the VPC endpoint that the request passed through. You can use any operator with `vpcEndpointId` .", "title": "Field", "type": "string" }, @@ -39612,7 +39560,7 @@ "type": "array" }, "Field": { - "markdownDescription": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `eventCategory` (required), `eventSource` , and `readOnly` . The following additional fields are available for event data stores: `eventName` , `eventType` , `sessionCredentialFromConsole` , and `userIdentity.arn` .\n\nFor CloudTrail data events, supported fields include `eventCategory` (required), `resources.type` (required), `eventName` , `readOnly` , and `resources.ARN` . The following additional fields are available for event data stores: `eventSource` , `eventType` , `sessionCredentialFromConsole` , and `userIdentity.arn` .\n\nFor CloudTrail network activity events, supported fields include `eventCategory` (required), `eventSource` (required), `eventName` , `errorCode` , and `vpcEndpointId` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - This is an optional field that is only used for management events and data events. This field can be set to `Equals` with a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - This field is only used for management events, data events (for event data stores only), and network activity events.\n\nFor management events for trails, this is an optional field that can be set to `NotEquals` `kms.amazonaws.com` to exclude KMS management events, or `NotEquals` `rdsdata.amazonaws.com` to exclude RDS management events.\n\nFor management and data events for event data stores, you can use it to include or exclude any event source and can use any operator.\n\nFor network activity events, this is a required field that only uses the `Equals` operator. Set this field to the event source for which you want to log network activity events. If you want to log network activity events for multiple event sources, you must create a separate field selector for each event source.\n\nThe following are valid values for network activity events:\n\n- `cloudtrail.amazonaws.com`\n- `ec2.amazonaws.com`\n- `kms.amazonaws.com`\n- `secretsmanager.amazonaws.com`\n- *`eventName`* - This is an optional field that is only used for data events, management events (for event data stores only), and network activity events. You can use any operator with `eventName` . You can use it to \ufb01lter in or \ufb01lter out specific events. You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This field is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n- For CloudTrail network activity events, the value must be `NetworkActivity` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For events outside of AWS , the value must be `ActivityAuditLog` .\n- *`eventType`* - This is an optional field available only for event data stores, which is used to filter management and data events on the event type. For information about available event types, see [CloudTrail record contents](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-event-reference-record-contents.html#ct-event-type) in the *AWS CloudTrail user guide* .\n- *`errorCode`* - This \ufb01eld is only used to filter CloudTrail network activity events and is optional. This is the error code to filter on. Currently, the only valid `errorCode` is `VpceAccessDenied` . `errorCode` can only use the `Equals` operator.\n- *`sessionCredentialFromConsole`* - This is an optional field available only for event data stores, which is used to filter management and data events based on whether the events originated from an AWS Management Console session. `sessionCredentialFromConsole` can only use the `Equals` and `NotEquals` operators.\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator.\n\nFor a list of available resource types for data events, see [Data events](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-data-events-with-cloudtrail.html#logging-data-events) in the *AWS CloudTrail User Guide* .\n\nYou can have only one `resources.type` \ufb01eld per selector. To log events on more than one resource type, add another selector.\n- *`resources.ARN`* - The `resources.ARN` is an optional field for data events. You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value.\n\nFor information about filtering data events on the `resources.ARN` field, see [Filtering data events by resources.ARN](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/filtering-data-events.html#filtering-data-events-resourcearn) in the *AWS CloudTrail User Guide* .\n\n> You can't use the `resources.ARN` field to filter resource types that do not have ARNs.\n- *`userIdentity.arn`* - This is an optional field available only for event data stores, which is used to filter management and data events on the userIdentity ARN. You can use any operator with `userIdentity.arn` . For more information on the userIdentity element, see [CloudTrail userIdentity element](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-event-reference-user-identity.html) in the *AWS CloudTrail User Guide* .\n- *`vpcEndpointId`* - This \ufb01eld is only used to filter CloudTrail network activity events and is optional. This field identifies the VPC endpoint that the request passed through. You can use any operator with `vpcEndpointId` .", + "markdownDescription": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `eventCategory` (required), `eventSource` , and `readOnly` . The following additional fields are available for event data stores: `eventName` , `eventType` , `sessionCredentialFromConsole` , and `userIdentity.arn` .\n\nFor CloudTrail data events, supported fields include `eventCategory` (required), `resources.type` (required), `eventName` , `readOnly` , and `resources.ARN` . The following additional fields are available for event data stores: `eventSource` , `eventType` , `sessionCredentialFromConsole` , and `userIdentity.arn` .\n\nFor CloudTrail network activity events, supported fields include `eventCategory` (required), `eventSource` (required), `eventName` , `errorCode` , and `vpcEndpointId` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n> Selectors don't support the use of wildcards like `*` . To match multiple values with a single condition, you may use `StartsWith` , `EndsWith` , `NotStartsWith` , or `NotEndsWith` to explicitly match the beginning or end of the event field. \n\n- *`readOnly`* - This is an optional field that is only used for management events and data events. This field can be set to `Equals` with a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - This field is only used for management events, data events (for event data stores only), and network activity events.\n\nFor management events for trails, this is an optional field that can be set to `NotEquals` `kms.amazonaws.com` to exclude KMS management events, or `NotEquals` `rdsdata.amazonaws.com` to exclude RDS management events.\n\nFor management and data events for event data stores, you can use it to include or exclude any event source and can use any operator.\n\nFor network activity events, this is a required field that only uses the `Equals` operator. Set this field to the event source for which you want to log network activity events. If you want to log network activity events for multiple event sources, you must create a separate field selector for each event source.\n\nThe following are valid values for network activity events:\n\n- `cloudtrail.amazonaws.com`\n- `ec2.amazonaws.com`\n- `kms.amazonaws.com`\n- `secretsmanager.amazonaws.com`\n- *`eventName`* - This is an optional field that is only used for data events, management events (for event data stores only), and network activity events. You can use any operator with `eventName` . You can use it to \ufb01lter in or \ufb01lter out specific events. You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This field is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n- For CloudTrail network activity events, the value must be `NetworkActivity` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For events outside of AWS , the value must be `ActivityAuditLog` .\n- *`eventType`* - This is an optional field available only for event data stores, which is used to filter management and data events on the event type. For information about available event types, see [CloudTrail record contents](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-event-reference-record-contents.html#ct-event-type) in the *AWS CloudTrail user guide* .\n- *`errorCode`* - This \ufb01eld is only used to filter CloudTrail network activity events and is optional. This is the error code to filter on. Currently, the only valid `errorCode` is `VpceAccessDenied` . `errorCode` can only use the `Equals` operator.\n- *`sessionCredentialFromConsole`* - This is an optional field available only for event data stores, which is used to filter management and data events based on whether the events originated from an AWS Management Console session. `sessionCredentialFromConsole` can only use the `Equals` and `NotEquals` operators.\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator.\n\nFor a list of available resource types for data events, see [Data events](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-data-events-with-cloudtrail.html#logging-data-events) in the *AWS CloudTrail User Guide* .\n\nYou can have only one `resources.type` \ufb01eld per selector. To log events on more than one resource type, add another selector.\n- *`resources.ARN`* - The `resources.ARN` is an optional field for data events. You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value.\n\nFor information about filtering data events on the `resources.ARN` field, see [Filtering data events by resources.ARN](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/filtering-data-events.html#filtering-data-events-resourcearn) in the *AWS CloudTrail User Guide* .\n\n> You can't use the `resources.ARN` field to filter resource types that do not have ARNs.\n- *`userIdentity.arn`* - This is an optional field available only for event data stores, which is used to filter management and data events on the userIdentity ARN. You can use any operator with `userIdentity.arn` . For more information on the userIdentity element, see [CloudTrail userIdentity element](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-event-reference-user-identity.html) in the *AWS CloudTrail User Guide* .\n- *`vpcEndpointId`* - This \ufb01eld is only used to filter CloudTrail network activity events and is optional. This field identifies the VPC endpoint that the request passed through. You can use any operator with `vpcEndpointId` .", "title": "Field", "type": "string" }, @@ -44103,7 +44051,7 @@ "additionalProperties": false, "properties": { "Category": { - "markdownDescription": "A category defines what kind of action can be taken in the stage, and constrains the provider type for the action. Valid categories are limited to one of the values below.\n\n- `Source`\n- `Build`\n- `Test`\n- `Deploy`\n- `Invoke`\n- `Approval`", + "markdownDescription": "A category defines what kind of action can be taken in the stage, and constrains the provider type for the action. Valid categories are limited to one of the values below.\n\n- `Source`\n- `Build`\n- `Test`\n- `Deploy`\n- `Invoke`\n- `Approval`\n- `Compute`", "title": "Category", "type": "string" }, @@ -45734,12 +45682,12 @@ "title": "CloudWatchLogsConfiguration" }, "EventSource": { - "markdownDescription": "The source of events that your user pool sends for logging. To send error-level logs about user notification activity, set to `userNotification` . To send info-level logs about advanced security features user activity, set to `userAuthEvents` .", + "markdownDescription": "The source of events that your user pool sends for logging. To send error-level logs about user notification activity, set to `userNotification` . To send info-level logs about threat-protection user activity in user pools with the Plus feature plan, set to `userAuthEvents` .", "title": "EventSource", "type": "string" }, "LogLevel": { - "markdownDescription": "The `errorlevel` selection of logs that a user pool sends for detailed activity logging. To send `userNotification` activity with [information about message delivery](https://docs.aws.amazon.com/cognito/latest/developerguide/tracking-quotas-and-usage-in-cloud-watch-logs.html) , choose `ERROR` with `CloudWatchLogsConfiguration` . To send `userAuthEvents` activity with user logs from advanced security features, choose `INFO` with one of `CloudWatchLogsConfiguration` , `FirehoseConfiguration` , or `S3Configuration` .", + "markdownDescription": "The `errorlevel` selection of logs that a user pool sends for detailed activity logging. To send `userNotification` activity with [information about message delivery](https://docs.aws.amazon.com/cognito/latest/developerguide/exporting-quotas-and-usage.html) , choose `ERROR` with `CloudWatchLogsConfiguration` . To send `userAuthEvents` activity with user logs from threat protection with the Plus feature plan, choose `INFO` with one of `CloudWatchLogsConfiguration` , `FirehoseConfiguration` , or `S3Configuration` .", "title": "LogLevel", "type": "string" } @@ -45795,7 +45743,7 @@ "items": { "type": "string" }, - "markdownDescription": "Attributes supported as an alias for this user pool. Possible values: *phone_number* , *email* , or *preferred_username* .", + "markdownDescription": "Attributes supported as an alias for this user pool. Possible values: *phone_number* , *email* , or *preferred_username* . For more information about alias attributes, see [Customizing sign-in attributes](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-settings-attributes.html#user-pool-settings-aliases) .", "title": "AliasAttributes", "type": "array" }, @@ -45803,7 +45751,7 @@ "items": { "type": "string" }, - "markdownDescription": "The attributes to be auto-verified. Possible values: *email* , *phone_number* .", + "markdownDescription": "The attributes that you want your user pool to automatically verify. Possible values: *email* , *phone_number* . For more information see [Verifying contact information at sign-up](https://docs.aws.amazon.com/cognito/latest/developerguide/signing-up-users-in-your-app.html#allowing-users-to-sign-up-and-confirm-themselves) .", "title": "AutoVerifiedAttributes", "type": "array" }, @@ -45814,7 +45762,7 @@ }, "DeviceConfiguration": { "$ref": "#/definitions/AWS::Cognito::UserPool.DeviceConfiguration", - "markdownDescription": "The device-remembering configuration for a user pool. A null value indicates that you have deactivated device remembering in your user pool.\n\n> When you provide a value for any `DeviceConfiguration` field, you activate the Amazon Cognito device-remembering feature.", + "markdownDescription": "The device-remembering configuration for a user pool. Device remembering or device tracking is a \"Remember me on this device\" option for user pools that perform authentication with the device key of a trusted device in the back end, instead of a user-provided MFA code. For more information about device authentication, see [Working with user devices in your user pool](https://docs.aws.amazon.com/cognito/latest/developerguide/amazon-cognito-user-pools-device-tracking.html) . A null value indicates that you have deactivated device remembering in your user pool.\n\n> When you provide a value for any `DeviceConfiguration` field, you activate the Amazon Cognito device-remembering feature. For more infor", "title": "DeviceConfiguration" }, "EmailConfiguration": { @@ -45859,7 +45807,7 @@ "items": { "$ref": "#/definitions/AWS::Cognito::UserPool.SchemaAttribute" }, - "markdownDescription": "An array of schema attributes for the new user pool. These attributes can be standard or custom attributes.", + "markdownDescription": "An array of attributes for the new user pool. You can add custom attributes and modify the properties of default attributes. The specifications in this parameter set the required attributes in your user pool. For more information, see [Working with user attributes](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-settings-attributes.html) .", "title": "Schema", "type": "array" }, @@ -45870,7 +45818,7 @@ }, "SmsConfiguration": { "$ref": "#/definitions/AWS::Cognito::UserPool.SmsConfiguration", - "markdownDescription": "The SMS configuration with the settings that your Amazon Cognito user pool must use to send an SMS message from your AWS account through Amazon Simple Notification Service. To send SMS messages with Amazon SNS in the AWS Region that you want, the Amazon Cognito user pool uses an AWS Identity and Access Management (IAM) role in your AWS account .", + "markdownDescription": "The SMS configuration with the settings that your Amazon Cognito user pool must use to send an SMS message from your AWS account through Amazon Simple Notification Service. To send SMS messages with Amazon SNS in the AWS Region that you want, the Amazon Cognito user pool uses an AWS Identity and Access Management (IAM) role in your AWS account . For more information see [SMS message settings](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-sms-settings.html) .", "title": "SmsConfiguration" }, "SmsVerificationMessage": { @@ -45885,11 +45833,11 @@ }, "UserPoolAddOns": { "$ref": "#/definitions/AWS::Cognito::UserPool.UserPoolAddOns", - "markdownDescription": "User pool add-ons. Contains settings for activation of advanced security features. To log user security information but take no action, set to `AUDIT` . To configure automatic security responses to risky traffic to your user pool, set to `ENFORCED` .\n\nFor more information, see [Adding advanced security to a user pool](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pool-settings-advanced-security.html) .", + "markdownDescription": "User pool add-ons. Contains settings for activation of threat protection. To log user security information but take no action, set to `AUDIT` . To configure automatic security responses to risky traffic to your user pool, set to `ENFORCED` .\n\nFor more information, see [Adding advanced security to a user pool](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pool-settings-advanced-security.html) . To activate this setting, your user pool must be on the [Plus tier](https://docs.aws.amazon.com/cognito/latest/developerguide/feature-plans-features-plus.html) .", "title": "UserPoolAddOns" }, "UserPoolName": { - "markdownDescription": "A string used to name the user pool.", + "markdownDescription": "A friendlhy name for your user pool.", "title": "UserPoolName", "type": "string" }, @@ -45914,7 +45862,7 @@ }, "UsernameConfiguration": { "$ref": "#/definitions/AWS::Cognito::UserPool.UsernameConfiguration", - "markdownDescription": "Case sensitivity on the username input for the selected sign-in option. When case sensitivity is set to `False` (case insensitive), users can sign in with any combination of capital and lowercase letters. For example, `username` , `USERNAME` , or `UserName` , or for email, `email@example.com` or `EMaiL@eXamplE.Com` . For most use cases, set case sensitivity to `False` (case insensitive) as a best practice. When usernames and email addresses are case insensitive, Amazon Cognito treats any variation in case as the same user, and prevents a case variation from being assigned to the same attribute for a different user.\n\nThis configuration is immutable after you set it. For more information, see [UsernameConfigurationType](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_UsernameConfigurationType.html) .", + "markdownDescription": "Sets the case sensitivity option for sign-in usernames. When `CaseSensitive` is `false` (case insensitive), users can sign in with any combination of capital and lowercase letters. For example, `username` , `USERNAME` , or `UserName` , or for email, `email@example.com` or `EMaiL@eXamplE.Com` . For most use cases, set case sensitivity to `false` as a best practice. When usernames and email addresses are case insensitive, Amazon Cognito treats any variation in case as the same user, and prevents a case variation from being assigned to the same attribute for a different user.\n\nWhen `CaseSensitive` is `true` (case sensitive), Amazon Cognito interprets `USERNAME` and `UserName` as distinct users.\n\nThis configuration is immutable after you set it.", "title": "UsernameConfiguration" }, "VerificationMessageTemplate": { @@ -46461,13 +46409,13 @@ "items": { "type": "string" }, - "markdownDescription": "The allowed OAuth scopes. Possible values provided by OAuth are `phone` , `email` , `openid` , and `profile` . Possible values provided by AWS are `aws.cognito.signin.user.admin` . Custom scopes created in Resource Servers are also supported.", + "markdownDescription": "The OAuth 2.0 scopes that you want to permit your app client to authorize. Scopes govern access control to user pool self-service API operations, user data from the `userInfo` endpoint, and third-party APIs. Possible values provided by OAuth are `phone` , `email` , `openid` , and `profile` . Possible values provided by AWS are `aws.cognito.signin.user.admin` . Custom scopes created in Resource Servers are also supported.", "title": "AllowedOAuthScopes", "type": "array" }, "AnalyticsConfiguration": { "$ref": "#/definitions/AWS::Cognito::UserPoolClient.AnalyticsConfiguration", - "markdownDescription": "The user pool analytics configuration for collecting metrics and sending them to your Amazon Pinpoint campaign.\n\n> In AWS Regions where Amazon Pinpoint isn't available, user pools only support sending events to Amazon Pinpoint projects in AWS Region us-east-1. In Regions where Amazon Pinpoint is available, user pools support sending events to Amazon Pinpoint projects within that same Region.", + "markdownDescription": "The user pool analytics configuration for collecting metrics and sending them to your Amazon Pinpoint campaign.\n\nIn AWS Regions where Amazon Pinpoint isn't available, user pools might not have access to analytics or might be configurable with campaigns in the US East (N. Virginia) Region. For more information, see [Using Amazon Pinpoint analytics](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pools-pinpoint-integration.html) .", "title": "AnalyticsConfiguration" }, "AuthSessionValidity": { @@ -46479,17 +46427,17 @@ "items": { "type": "string" }, - "markdownDescription": "A list of allowed redirect (callback) URLs for the IdPs.\n\nA redirect URI must:\n\n- Be an absolute URI.\n- Be registered with the authorization server.\n- Not include a fragment component.\n\nSee [OAuth 2.0 - Redirection Endpoint](https://docs.aws.amazon.com/https://tools.ietf.org/html/rfc6749#section-3.1.2) .\n\nAmazon Cognito requires HTTPS over HTTP except for http://localhost for testing purposes only.\n\nApp callback URLs such as myapp://example are also supported.", + "markdownDescription": "A list of allowed redirect (callback) URLs for the IdPs.\n\nA redirect URI must:\n\n- Be an absolute URI.\n- Be registered with the authorization server. Amazon Cognito doesn't accept authorization requests with `redirect_uri` values that aren't in the list of `CallbackURLs` that you provide in this parameter.\n- Not include a fragment component.\n\nSee [OAuth 2.0 - Redirection Endpoint](https://docs.aws.amazon.com/https://tools.ietf.org/html/rfc6749#section-3.1.2) .\n\nAmazon Cognito requires HTTPS over HTTP except for http://localhost for testing purposes only.\n\nApp callback URLs such as myapp://example are also supported.", "title": "CallbackURLs", "type": "array" }, "ClientName": { - "markdownDescription": "The client name for the user pool client you would like to create.", + "markdownDescription": "A friendly name for the app client that you want to create.", "title": "ClientName", "type": "string" }, "DefaultRedirectURI": { - "markdownDescription": "The default redirect URI. In app clients with one assigned IdP, replaces `redirect_uri` in authentication requests. Must be in the `CallbackURLs` list.\n\nA redirect URI must:\n\n- Be an absolute URI.\n- Be registered with the authorization server.\n- Not include a fragment component.\n\nFor more information, see [Default redirect URI](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-settings-client-apps.html#cognito-user-pools-app-idp-settings-about) .\n\nAmazon Cognito requires HTTPS over HTTP except for http://localhost for testing purposes only.\n\nApp callback URLs such as myapp://example are also supported.", + "markdownDescription": "The default redirect URI. In app clients with one assigned IdP, replaces `redirect_uri` in authentication requests. Must be in the `CallbackURLs` list.", "title": "DefaultRedirectURI", "type": "string" }, @@ -46507,12 +46455,12 @@ "items": { "type": "string" }, - "markdownDescription": "The authentication flows that you want your user pool client to support. For each app client in your user pool, you can sign in your users with any combination of one or more flows, including with a user name and Secure Remote Password (SRP), a user name and password, or a custom authentication process that you define with Lambda functions.\n\n> If you don't specify a value for `ExplicitAuthFlows` , your user client supports `ALLOW_REFRESH_TOKEN_AUTH` , `ALLOW_USER_SRP_AUTH` , and `ALLOW_CUSTOM_AUTH` . \n\nValid values include:\n\n- `ALLOW_USER_AUTH` : Enable selection-based sign-in with `USER_AUTH` . This setting covers username-password, secure remote password (SRP), passwordless, and passkey authentication. This authentiation flow can do username-password and SRP authentication without other `ExplicitAuthFlows` permitting them. For example users can complete an SRP challenge through `USER_AUTH` without the flow `USER_SRP_AUTH` being active for the app client. This flow doesn't include `CUSTOM_AUTH` .\n- `ALLOW_ADMIN_USER_PASSWORD_AUTH` : Enable admin based user password authentication flow `ADMIN_USER_PASSWORD_AUTH` . This setting replaces the `ADMIN_NO_SRP_AUTH` setting. With this authentication flow, your app passes a user name and password to Amazon Cognito in the request, instead of using the Secure Remote Password (SRP) protocol to securely transmit the password.\n- `ALLOW_CUSTOM_AUTH` : Enable Lambda trigger based authentication.\n- `ALLOW_USER_PASSWORD_AUTH` : Enable user password-based authentication. In this flow, Amazon Cognito receives the password in the request instead of using the SRP protocol to verify passwords.\n- `ALLOW_USER_SRP_AUTH` : Enable SRP-based authentication.\n- `ALLOW_REFRESH_TOKEN_AUTH` : Enable authflow to refresh tokens.\n\nIn some environments, you will see the values `ADMIN_NO_SRP_AUTH` , `CUSTOM_AUTH_FLOW_ONLY` , or `USER_PASSWORD_AUTH` . You can't assign these legacy `ExplicitAuthFlows` values to user pool clients at the same time as values that begin with `ALLOW_` ,\nlike `ALLOW_USER_SRP_AUTH` .", + "markdownDescription": "The authentication flows that you want your user pool client to support. For each app client in your user pool, you can sign in your users with any combination of one or more flows, including with a user name and Secure Remote Password (SRP), a user name and password, or a custom authentication process that you define with Lambda functions.\n\n> If you don't specify a value for `ExplicitAuthFlows` , your user client supports `ALLOW_REFRESH_TOKEN_AUTH` , `ALLOW_USER_SRP_AUTH` , and `ALLOW_CUSTOM_AUTH` . \n\nValid values include:\n\n- `ALLOW_USER_AUTH` : Enable selection-based sign-in with `USER_AUTH` . This setting covers username-password, secure remote password (SRP), passwordless, and passkey authentication. This authentiation flow can do username-password and SRP authentication without other `ExplicitAuthFlows` permitting them. For example users can complete an SRP challenge through `USER_AUTH` without the flow `USER_SRP_AUTH` being active for the app client. This flow doesn't include `CUSTOM_AUTH` .\n\nTo activate this setting, your user pool must be in the [Essentials tier](https://docs.aws.amazon.com/cognito/latest/developerguide/feature-plans-features-essentials.html) or higher.\n- `ALLOW_ADMIN_USER_PASSWORD_AUTH` : Enable admin based user password authentication flow `ADMIN_USER_PASSWORD_AUTH` . This setting replaces the `ADMIN_NO_SRP_AUTH` setting. With this authentication flow, your app passes a user name and password to Amazon Cognito in the request, instead of using the Secure Remote Password (SRP) protocol to securely transmit the password.\n- `ALLOW_CUSTOM_AUTH` : Enable Lambda trigger based authentication.\n- `ALLOW_USER_PASSWORD_AUTH` : Enable user password-based authentication. In this flow, Amazon Cognito receives the password in the request instead of using the SRP protocol to verify passwords.\n- `ALLOW_USER_SRP_AUTH` : Enable SRP-based authentication.\n- `ALLOW_REFRESH_TOKEN_AUTH` : Enable authflow to refresh tokens.\n\nIn some environments, you will see the values `ADMIN_NO_SRP_AUTH` , `CUSTOM_AUTH_FLOW_ONLY` , or `USER_PASSWORD_AUTH` . You can't assign these legacy `ExplicitAuthFlows` values to user pool clients at the same time as values that begin with `ALLOW_` ,\nlike `ALLOW_USER_SRP_AUTH` .", "title": "ExplicitAuthFlows", "type": "array" }, "GenerateSecret": { - "markdownDescription": "Boolean to specify whether you want to generate a secret for the user pool client being created.", + "markdownDescription": "When `true` , generates a client secret for the app client. Client secrets are used with server-side and machine-to-machine applications. For more information, see [App client types](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-settings-client-apps.html#user-pool-settings-client-app-client-types) .", "title": "GenerateSecret", "type": "boolean" }, @@ -46525,7 +46473,7 @@ "items": { "type": "string" }, - "markdownDescription": "A list of allowed logout URLs for the IdPs.", + "markdownDescription": "A list of allowed logout URLs for managed login authentication. For more information, see [Logout endpoint](https://docs.aws.amazon.com/cognito/latest/developerguide/logout-endpoint.html) .", "title": "LogoutURLs", "type": "array" }, @@ -46551,17 +46499,17 @@ "items": { "type": "string" }, - "markdownDescription": "A list of provider names for the identity providers (IdPs) that are supported on this client. The following are supported: `COGNITO` , `Facebook` , `Google` , `SignInWithApple` , and `LoginWithAmazon` . You can also specify the names that you configured for the SAML and OIDC IdPs in your user pool, for example `MySAMLIdP` or `MyOIDCIdP` .\n\nThis setting applies to providers that you can access with the [hosted UI and OAuth 2.0 authorization server](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pools-app-integration.html) . The removal of `COGNITO` from this list doesn't prevent authentication operations for local users with the user pools API in an AWS SDK. The only way to prevent API-based authentication is to block access with a [AWS WAF rule](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-waf.html) .", + "markdownDescription": "A list of provider names for the identity providers (IdPs) that are supported on this client. The following are supported: `COGNITO` , `Facebook` , `Google` , `SignInWithApple` , and `LoginWithAmazon` . You can also specify the names that you configured for the SAML and OIDC IdPs in your user pool, for example `MySAMLIdP` or `MyOIDCIdP` .\n\nThis setting applies to providers that you can access with [managed login](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pools-managed-login.html) . The removal of `COGNITO` from this list doesn't prevent authentication operations for local users with the user pools API in an AWS SDK. The only way to prevent API-based authentication is to block access with a [AWS WAF rule](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-waf.html) .", "title": "SupportedIdentityProviders", "type": "array" }, "TokenValidityUnits": { "$ref": "#/definitions/AWS::Cognito::UserPoolClient.TokenValidityUnits", - "markdownDescription": "The units in which the validity times are represented. The default unit for RefreshToken is days, and default for ID and access tokens are hours.", + "markdownDescription": "The units that validity times are represented in. The default unit for refresh tokens is days, and the default for ID and access tokens are hours.", "title": "TokenValidityUnits" }, "UserPoolId": { - "markdownDescription": "The user pool ID for the user pool where you want to create a user pool client.", + "markdownDescription": "The ID of the user pool where you want to create an app client.", "title": "UserPoolId", "type": "string" }, @@ -46777,12 +46725,12 @@ "additionalProperties": false, "properties": { "Description": { - "markdownDescription": "A string containing the description of the group.", + "markdownDescription": "A description of the group that you're creating.", "title": "Description", "type": "string" }, "GroupName": { - "markdownDescription": "The name of the group. Must be unique.", + "markdownDescription": "A name for the group. This name must be unique in your user pool.", "title": "GroupName", "type": "string" }, @@ -46792,12 +46740,12 @@ "type": "number" }, "RoleArn": { - "markdownDescription": "The role Amazon Resource Name (ARN) for the group.", + "markdownDescription": "The Amazon Resource Name (ARN) for the IAM role that you want to associate with the group. A group role primarily declares a preferred role for the credentials that you get from an identity pool. Amazon Cognito ID tokens have a `cognito:preferred_role` claim that presents the highest-precedence group that a user belongs to. Both ID and access tokens also contain a `cognito:groups` claim that list all the groups that a user is a member of.", "title": "RoleArn", "type": "string" }, "UserPoolId": { - "markdownDescription": "The user pool ID for the user pool.", + "markdownDescription": "The ID of the user pool where you want to create a user group.", "title": "UserPoolId", "type": "string" } @@ -46864,7 +46812,7 @@ "additionalProperties": false, "properties": { "AttributeMapping": { - "markdownDescription": "A mapping of IdP attributes to standard and custom user pool attributes.", + "markdownDescription": "A mapping of IdP attributes to standard and custom user pool attributes. Specify a user pool attribute as the key of the key-value pair, and the IdP attribute claim name as the value.", "title": "AttributeMapping", "type": "object" }, @@ -46872,7 +46820,7 @@ "items": { "type": "string" }, - "markdownDescription": "A list of IdP identifiers.", + "markdownDescription": "An array of IdP identifiers, for example `\"IdPIdentifiers\": [ \"MyIdP\", \"MyIdP2\" ]` . Identifiers are friendly names that you can pass in the `idp_identifier` query parameter of requests to the [Authorize endpoint](https://docs.aws.amazon.com/cognito/latest/developerguide/authorization-endpoint.html) to silently redirect to sign-in with the associated IdP. Identifiers in a domain format also enable the use of [email-address matching with SAML providers](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pools-managing-saml-idp-naming.html) .", "title": "IdpIdentifiers", "type": "array" }, @@ -46882,17 +46830,17 @@ "type": "object" }, "ProviderName": { - "markdownDescription": "The IdP name.", + "markdownDescription": "The name that you want to assign to the IdP. You can pass the identity provider name in the `identity_provider` query parameter of requests to the [Authorize endpoint](https://docs.aws.amazon.com/cognito/latest/developerguide/authorization-endpoint.html) to silently redirect to sign-in with the associated IdP.", "title": "ProviderName", "type": "string" }, "ProviderType": { - "markdownDescription": "The IdP type.", + "markdownDescription": "The type of IdP that you want to add. Amazon Cognito supports OIDC, SAML 2.0, Login With Amazon, Sign In With Apple, Google, and Facebook IdPs.", "title": "ProviderType", "type": "string" }, "UserPoolId": { - "markdownDescription": "The user pool ID.", + "markdownDescription": "The Id of the user pool where you want to create an IdP.", "title": "UserPoolId", "type": "string" } @@ -46979,7 +46927,7 @@ "type": "array" }, "UserPoolId": { - "markdownDescription": "The user pool ID for the user pool.", + "markdownDescription": "The ID of the user pool where you want to create a resource server.", "title": "UserPoolId", "type": "string" } @@ -47347,7 +47295,7 @@ "type": "string" }, "UserPoolId": { - "markdownDescription": "The user pool ID for the user pool.", + "markdownDescription": "The ID of the user pool.", "title": "UserPoolId", "type": "string" } @@ -47416,7 +47364,7 @@ "properties": { "ClientMetadata": { "additionalProperties": true, - "markdownDescription": "A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.\n\nYou create custom workflows by assigning AWS Lambda functions to user pool triggers. When you use the AdminCreateUser API action, Amazon Cognito invokes the function that is assigned to the *pre sign-up* trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a `clientMetadata` attribute, which provides the data that you assigned to the ClientMetadata parameter in your AdminCreateUser request. In your function code in AWS Lambda , you can process the `clientMetadata` value to enhance your workflow for your specific needs.\n\nFor more information, see [Customizing user pool Workflows with Lambda Triggers](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-identity-pools-working-with-aws-lambda-triggers.html) in the *Amazon Cognito Developer Guide* .\n\n> When you use the ClientMetadata parameter, remember that Amazon Cognito won't do the following:\n> \n> - Store the ClientMetadata value. This data is available only to AWS Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration doesn't include triggers, the ClientMetadata parameter serves no purpose.\n> - Validate the ClientMetadata value.\n> - Encrypt the ClientMetadata value. Don't use Amazon Cognito to provide sensitive information.", + "markdownDescription": "A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.\n\nYou create custom workflows by assigning AWS Lambda functions to user pool triggers. When you use the AdminCreateUser API action, Amazon Cognito invokes the function that is assigned to the *pre sign-up* trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a `ClientMetadata` attribute, which provides the data that you assigned to the ClientMetadata parameter in your AdminCreateUser request. In your function code in AWS Lambda , you can process the `clientMetadata` value to enhance your workflow for your specific needs.\n\nFor more information, see [Using Lambda triggers](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-identity-pools-working-with-aws-lambda-triggers.html) in the *Amazon Cognito Developer Guide* .\n\n> When you use the `ClientMetadata` parameter, note that Amazon Cognito won't do the following:\n> \n> - Store the `ClientMetadata` value. This data is available only to AWS Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration doesn't include triggers, the `ClientMetadata` parameter serves no purpose.\n> - Validate the `ClientMetadata` value.\n> - Encrypt the `ClientMetadata` value. Don't send sensitive information in this parameter.", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" @@ -47429,17 +47377,17 @@ "items": { "type": "string" }, - "markdownDescription": "Specify `\"EMAIL\"` if email will be used to send the welcome message. Specify `\"SMS\"` if the phone number will be used. The default value is `\"SMS\"` . You can specify more than one value.", + "markdownDescription": "Specify `EMAIL` if email will be used to send the welcome message. Specify `SMS` if the phone number will be used. The default value is `SMS` . You can specify more than one value.", "title": "DesiredDeliveryMediums", "type": "array" }, "ForceAliasCreation": { - "markdownDescription": "This parameter is used only if the `phone_number_verified` or `email_verified` attribute is set to `True` . Otherwise, it is ignored.\n\nIf this parameter is set to `True` and the phone number or email address specified in the UserAttributes parameter already exists as an alias with a different user, the API call will migrate the alias from the previous user to the newly created user. The previous user will no longer be able to log in using that alias.\n\nIf this parameter is set to `False` , the API throws an `AliasExistsException` error if the alias already exists. The default value is `False` .", + "markdownDescription": "This parameter is used only if the `phone_number_verified` or `email_verified` attribute is set to `True` . Otherwise, it is ignored.\n\nIf this parameter is set to `True` and the phone number or email address specified in the `UserAttributes` parameter already exists as an alias with a different user, this request migrates the alias from the previous user to the newly-created user. The previous user will no longer be able to log in using that alias.\n\nIf this parameter is set to `False` , the API throws an `AliasExistsException` error if the alias already exists. The default value is `False` .", "title": "ForceAliasCreation", "type": "boolean" }, "MessageAction": { - "markdownDescription": "Set to `RESEND` to resend the invitation message to a user that already exists and reset the expiration limit on the user's account. Set to `SUPPRESS` to suppress sending the message. You can specify only one value.", + "markdownDescription": "Set to `RESEND` to resend the invitation message to a user that already exists, and to reset the temporary-password duration with a new temporary password. Set to `SUPPRESS` to suppress sending the message. You can specify only one value.", "title": "MessageAction", "type": "string" }, @@ -47452,7 +47400,7 @@ "type": "array" }, "UserPoolId": { - "markdownDescription": "The user pool ID for the user pool where the user will be created.", + "markdownDescription": "The ID of the user pool where you want to create a user.", "title": "UserPoolId", "type": "string" }, @@ -47553,7 +47501,7 @@ "type": "string" }, "UserPoolId": { - "markdownDescription": "The user pool ID for the user pool.", + "markdownDescription": "The ID of the user pool that contains the group that you want to add the user to.", "title": "UserPoolId", "type": "string" }, @@ -51023,7 +50971,7 @@ "type": "string" }, "SourcePhoneNumberArn": { - "markdownDescription": "The claimed phone number ARN that was previously imported from the external service, such as Amazon Pinpoint. If it is from Amazon Pinpoint, it looks like the ARN of the phone number that was imported from Amazon Pinpoint.", + "markdownDescription": "The claimed phone number ARN that was previously imported from the external service, such as AWS End User Messaging. If it is from AWS End User Messaging, it looks like the ARN of the phone number that was imported from AWS End User Messaging.", "title": "SourcePhoneNumberArn", "type": "string" }, @@ -52883,12 +52831,12 @@ "type": "string" }, "FirstName": { - "markdownDescription": "The first name. This is required if you are using Amazon Connect or SAML for identity management.", + "markdownDescription": "The first name. This is required if you are using Amazon Connect or SAML for identity management. Inputs must be in Unicode Normalization Form C (NFC). Text containing characters in a non-NFC form (for example, decomposed characters or combining marks) are not accepted.", "title": "FirstName", "type": "string" }, "LastName": { - "markdownDescription": "The last name. This is required if you are using Amazon Connect or SAML for identity management.", + "markdownDescription": "The last name. This is required if you are using Amazon Connect or SAML for identity management. Inputs must be in Unicode Normalization Form C (NFC). Text containing characters in a non-NFC form (for example, decomposed characters or combining marks) are not accepted.", "title": "LastName", "type": "string" }, @@ -55612,7 +55560,7 @@ "type": "string" }, "Location": { - "markdownDescription": "*[Custom snapshot policies only]* Specifies the destination for snapshots created by the policy. To create snapshots in the same Region as the source resource, specify `CLOUD` . To create snapshots on the same Outpost as the source resource, specify `OUTPOST_LOCAL` . If you omit this parameter, `CLOUD` is used by default.\n\nIf the policy targets resources in an AWS Region , then you must create snapshots in the same Region as the source resource. If the policy targets resources on an Outpost, then you can create snapshots on the same Outpost as the source resource, or in the Region of that Outpost.", + "markdownDescription": "*[Custom snapshot policies only]* Specifies the destination for snapshots created by the policy. The allowed destinations depend on the location of the targeted resources.\n\n- If the policy targets resources in a Region, then you must create snapshots in the same Region as the source resource.\n- If the policy targets resources in a Local Zone, you can create snapshots in the same Local Zone or in its parent Region.\n- If the policy targets resources on an Outpost, then you can create snapshots on the same Outpost or in its parent Region.\n\nSpecify one of the following values:\n\n- To create snapshots in the same Region as the source resource, specify `CLOUD` .\n- To create snapshots in the same Local Zone as the source resource, specify `LOCAL_ZONE` .\n- To create snapshots on the same Outpost as the source resource, specify `OUTPOST_LOCAL` .\n\nDefault: `CLOUD`", "title": "Location", "type": "string" }, @@ -55989,7 +55937,7 @@ "items": { "type": "string" }, - "markdownDescription": "*[Custom snapshot and AMI policies only]* The location of the resources to backup. If the source resources are located in an AWS Region , specify `CLOUD` . If the source resources are located on an Outpost in your account, specify `OUTPOST` .\n\nIf you specify `OUTPOST` , Amazon Data Lifecycle Manager backs up all resources of the specified type with matching target tags across all of the Outposts in your account.", + "markdownDescription": "*[Custom snapshot and AMI policies only]* The location of the resources to backup.\n\n- If the source resources are located in a Region, specify `CLOUD` . In this case, the policy targets all resources of the specified type with matching target tags across all Availability Zones in the Region.\n- *[Custom snapshot policies only]* If the source resources are located in a Local Zone, specify `LOCAL_ZONE` . In this case, the policy targets all resources of the specified type with matching target tags across all Local Zones in the Region.\n- If the source resources are located on an Outpost in your account, specify `OUTPOST` . In this case, the policy targets all resources of the specified type with matching target tags across all of the Outposts in your account.", "title": "ResourceLocations", "type": "array" }, @@ -56094,7 +56042,7 @@ "items": { "$ref": "#/definitions/AWS::DLM::LifecyclePolicy.CrossRegionCopyRule" }, - "markdownDescription": "Specifies a rule for copying snapshots or AMIs across regions.\n\n> You can't specify cross-Region copy rules for policies that create snapshots on an Outpost. If the policy creates snapshots in a Region, then snapshots can be copied to up to three Regions or Outposts.", + "markdownDescription": "Specifies a rule for copying snapshots or AMIs across Regions.\n\n> You can't specify cross-Region copy rules for policies that create snapshots on an Outpost or in a Local Zone. If the policy creates snapshots in a Region, then snapshots can be copied to up to three Regions or Outposts.", "title": "CrossRegionCopyRules", "type": "array" }, @@ -61738,7 +61686,7 @@ "type": "string" }, "Subdirectory": { - "markdownDescription": "Specifies a mount path for your Amazon EFS file system. This is where DataSync reads or writes data (depending on if this is a source or destination location) on your file system.\n\nBy default, DataSync uses the root directory (or [access point](https://docs.aws.amazon.com/efs/latest/ug/efs-access-points.html) if you provide one by using `AccessPointArn` ). You can also include subdirectories using forward slashes (for example, `/path/to/folder` ).", + "markdownDescription": "Specifies a mount path for your Amazon EFS file system. This is where DataSync reads or writes data on your file system (depending on if this is a source or destination location).\n\nBy default, DataSync uses the root directory (or [access point](https://docs.aws.amazon.com/efs/latest/ug/efs-access-points.html) if you provide one by using `AccessPointArn` ). You can also include subdirectories using forward slashes (for example, `/path/to/folder` ).", "title": "Subdirectory", "type": "string" }, @@ -61836,7 +61784,7 @@ "additionalProperties": false, "properties": { "FsxFilesystemArn": { - "markdownDescription": "The Amazon Resource Name (ARN) for the FSx for Lustre file system.", + "markdownDescription": "Specifies the Amazon Resource Name (ARN) of the FSx for Lustre file system.", "title": "FsxFilesystemArn", "type": "string" }, @@ -61849,7 +61797,7 @@ "type": "array" }, "Subdirectory": { - "markdownDescription": "A subdirectory in the location's path. This subdirectory in the FSx for Lustre file system is used to read data from the FSx for Lustre source location or write data to the FSx for Lustre destination.", + "markdownDescription": "Specifies a mount path for your FSx for Lustre file system. The path can include subdirectories.\n\nWhen the location is used as a source, DataSync reads data from the mount path. When the location is used as a destination, DataSync writes data to the mount path. If you don't include this parameter, DataSync uses the file system's root directory ( `/` ).", "title": "Subdirectory", "type": "string" }, @@ -61857,7 +61805,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "The key-value pair that represents a tag that you want to add to the resource. The value can be an empty string. This value helps you manage, filter, and search for your resources. We recommend that you create a name tag for your location.", + "markdownDescription": "Specifies labels that help you categorize, filter, and search for your AWS resources. We recommend creating at least a name tag for your location.", "title": "Tags", "type": "array" } @@ -61942,7 +61890,7 @@ "type": "string" }, "Subdirectory": { - "markdownDescription": "Specifies a path to the file share in the SVM where you'll copy your data.\n\nYou can specify a junction path (also known as a mount point), qtree path (for NFS file shares), or share name (for SMB file shares). For example, your mount path might be `/vol1` , `/vol1/tree1` , or `/share1` .\n\n> Don't specify a junction path in the SVM's root volume. For more information, see [Managing FSx for ONTAP storage virtual machines](https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/managing-svms.html) in the *Amazon FSx for NetApp ONTAP User Guide* .", + "markdownDescription": "Specifies a path to the file share in the SVM where you want to transfer data to or from.\n\nYou can specify a junction path (also known as a mount point), qtree path (for NFS file shares), or share name (for SMB file shares). For example, your mount path might be `/vol1` , `/vol1/tree1` , or `/share1` .\n\n> Don't specify a junction path in the SVM's root volume. For more information, see [Managing FSx for ONTAP storage virtual machines](https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/managing-svms.html) in the *Amazon FSx for NetApp ONTAP User Guide* .", "title": "Subdirectory", "type": "string" }, @@ -62027,7 +61975,7 @@ "additionalProperties": false, "properties": { "Domain": { - "markdownDescription": "Specifies the fully qualified domain name (FQDN) of the Microsoft Active Directory that your storage virtual machine (SVM) belongs to.\n\nIf you have multiple domains in your environment, configuring this setting makes sure that DataSync connects to the right SVM.", + "markdownDescription": "Specifies the name of the Windows domain that your storage virtual machine (SVM) belongs to.\n\nIf you have multiple domains in your environment, configuring this setting makes sure that DataSync connects to the right SVM.\n\nIf you have multiple Active Directory domains in your environment, configuring this parameter makes sure that DataSync connects to the right SVM.", "title": "Domain", "type": "string" }, @@ -62231,7 +62179,7 @@ "additionalProperties": false, "properties": { "Domain": { - "markdownDescription": "Specifies the name of the Microsoft Active Directory domain that the FSx for Windows File Server file system belongs to.\n\nIf you have multiple Active Directory domains in your environment, configuring this parameter makes sure that DataSync connects to the right file system.", + "markdownDescription": "Specifies the name of the Windows domain that the FSx for Windows File Server file system belongs to.\n\nIf you have multiple Active Directory domains in your environment, configuring this parameter makes sure that DataSync connects to the right file system.", "title": "Domain", "type": "string" }, @@ -71829,7 +71777,7 @@ "type": "string" }, "PropagateTagsToVolumeOnCreation": { - "markdownDescription": "Indicates whether to assign the tags from the instance to all of the volumes attached to the instance at launch. If you specify `true` and you assign tags to the instance, those tags are automatically assigned to all of the volumes that you attach to the instance at launch. If you specify `false` , those tags are not assigned to the attached volumes.", + "markdownDescription": "Indicates whether to assign the tags specified in the `Tags` property to the volumes specified in the `BlockDeviceMappings` property.\n\nNote that using this feature does not assign the tags to volumes that are created separately and then attached using `AWS::EC2::VolumeAttachment` .", "title": "PropagateTagsToVolumeOnCreation", "type": "boolean" }, @@ -76993,7 +76941,7 @@ "type": "string" }, "GroupName": { - "markdownDescription": "The name of the security group.\n\nConstraints: Up to 255 characters in length. Cannot start with `sg-` .\n\nValid characters: a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=&;{}!$*", + "markdownDescription": "[Default VPC] The name of the security group. For security groups for a default VPC you can specify either the ID or the name of the security group. For security groups for a nondefault VPC, you must specify the ID of the security group.", "title": "GroupName", "type": "string" }, @@ -77453,7 +77401,7 @@ "type": "array" }, "SecondaryPrivateIpAddressCount": { - "markdownDescription": "The number of secondary private IPv4 addresses. You can't specify this option and specify more than one private IP address using the private IP addresses option. You cannot specify this option if you're launching more than one instance in a [RunInstances](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances.html) request.", + "markdownDescription": "The number of secondary private IPv4 addresses. You can\u2019t specify this parameter and also specify a secondary private IP address using the `PrivateIpAddress` parameter.", "title": "SecondaryPrivateIpAddressCount", "type": "number" }, @@ -83157,7 +83105,7 @@ "items": { "$ref": "#/definitions/AWS::ECS::Cluster.ClusterSettings" }, - "markdownDescription": "The settings to use when creating a cluster. This parameter is used to turn on CloudWatch Container Insights for a cluster.", + "markdownDescription": "The settings to use when creating a cluster. This parameter is used to turn on CloudWatch Container Insights with enhanced observability or CloudWatch Container Insights for a cluster.\n\nContainer Insights with enhanced observability provides all the Container Insights metrics, plus additional task and container metrics. This version supports enhanced observability for Amazon ECS clusters using the Amazon EC2 and Fargate launch types. After you configure Container Insights with enhanced observability on Amazon ECS, Container Insights auto-collects detailed infrastructure telemetry from the cluster level down to the container level in your environment and displays these critical performance data in curated dashboards removing the heavy lifting in observability set-up.\n\nFor more information, see [Monitor Amazon ECS containers using Container Insights with enhanced observability](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/cloudwatch-container-insights.html) in the *Amazon Elastic Container Service Developer Guide* .", "title": "ClusterSettings", "type": "array" }, @@ -83251,7 +83199,7 @@ "type": "string" }, "Value": { - "markdownDescription": "The value to set for the cluster setting. The supported values are `enabled` and `disabled` .\n\nIf you set `name` to `containerInsights` and `value` to `enabled` , CloudWatch Container Insights will be on for the cluster, otherwise it will be off unless the `containerInsights` account setting is turned on. If a cluster value is specified, it will override the `containerInsights` value set with [PutAccountSetting](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_PutAccountSetting.html) or [PutAccountSettingDefault](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_PutAccountSettingDefault.html) .", + "markdownDescription": "The value to set for the cluster setting. The supported values are `enhanced` , `enabled` , and `disabled` .\n\nTo use Container Insights with enhanced observability, set the `containerInsights` account setting to `enhanced` .\n\nTo use Container Insights, set the `containerInsights` account setting to `enabled` .\n\nIf a cluster value is specified, it will override the `containerInsights` value set with [PutAccountSetting](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_PutAccountSetting.html) or [PutAccountSettingDefault](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_PutAccountSettingDefault.html) .", "title": "Value", "type": "string" } @@ -83548,7 +83496,7 @@ "items": { "$ref": "#/definitions/AWS::ECS::Service.CapacityProviderStrategyItem" }, - "markdownDescription": "The capacity provider strategy to use for the service.\n\nIf a `capacityProviderStrategy` is specified, the `launchType` parameter must be omitted. If no `capacityProviderStrategy` or `launchType` is specified, the `defaultCapacityProviderStrategy` for the cluster is used.\n\nA capacity provider strategy may contain a maximum of 6 capacity providers.", + "markdownDescription": "The capacity provider strategy to use for the service.\n\nIf a `capacityProviderStrategy` is specified, the `launchType` parameter must be omitted. If no `capacityProviderStrategy` or `launchType` is specified, the `defaultCapacityProviderStrategy` for the cluster is used.\n\nA capacity provider strategy can contain a maximum of 20 capacity providers.", "title": "CapacityProviderStrategy", "type": "array" }, @@ -83707,7 +83655,7 @@ "additionalProperties": false, "properties": { "AssignPublicIp": { - "markdownDescription": "Whether the task's elastic network interface receives a public IP address. The default value is `DISABLED` .", + "markdownDescription": "Whether the task's elastic network interface receives a public IP address. The default value is `ENABLED` .", "title": "AssignPublicIp", "type": "string" }, @@ -83814,7 +83762,7 @@ "title": "DeploymentCircuitBreaker" }, "MaximumPercent": { - "markdownDescription": "If a service is using the rolling update ( `ECS` ) deployment type, the `maximumPercent` parameter represents an upper limit on the number of your service's tasks that are allowed in the `RUNNING` or `PENDING` state during a deployment, as a percentage of the `desiredCount` (rounded down to the nearest integer). This parameter enables you to define the deployment batch size. For example, if your service is using the `REPLICA` service scheduler and has a `desiredCount` of four tasks and a `maximumPercent` value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default `maximumPercent` value for a service using the `REPLICA` service scheduler is 200%.\n\nThe Amazon ECS scheduler uses this parameter to replace unhealthy tasks by starting replacement tasks first and then stopping the unhealthy tasks, as long as cluster resources for starting replacement tasks are available. For more information about how the scheduler replaces unhealthy tasks, see [Amazon ECS services](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs_services.html) .\n\nIf a service is using either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types, and tasks in the service use the EC2 launch type, the *maximum percent* value is set to the default value. The *maximum percent* value is used to define the upper limit on the number of the tasks in the service that remain in the `RUNNING` state while the container instances are in the `DRAINING` state.\n\n> You can't specify a custom `maximumPercent` value for a service that uses either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and has tasks that use the EC2 launch type. \n\nIf the tasks in the service use the Fargate launch type, the maximum percent value is not used, although it is returned when describing your service.", + "markdownDescription": "If a service is using the rolling update ( `ECS` ) deployment type, the `maximumPercent` parameter represents an upper limit on the number of your service's tasks that are allowed in the `RUNNING` or `PENDING` state during a deployment, as a percentage of the `desiredCount` (rounded down to the nearest integer). This parameter enables you to define the deployment batch size. For example, if your service is using the `REPLICA` service scheduler and has a `desiredCount` of four tasks and a `maximumPercent` value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default `maximumPercent` value for a service using the `REPLICA` service scheduler is 200%.\n\nThe Amazon ECS scheduler uses this parameter to replace unhealthy tasks by starting replacement tasks first and then stopping the unhealthy tasks, as long as cluster resources for starting replacement tasks are available. For more information about how the scheduler replaces unhealthy tasks, see [Amazon ECS services](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs_services.html) .\n\nIf a service is using either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types, and tasks in the service use the EC2 launch type, the *maximum percent* value is set to the default value. The *maximum percent* value is used to define the upper limit on the number of the tasks in the service that remain in the `RUNNING` state while the container instances are in the `DRAINING` state.\n\n> You can't specify a custom `maximumPercent` value for a service that uses either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and has tasks that use the EC2 launch type. \n\nIf the service uses either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types, and the tasks in the service use the Fargate launch type, the maximum percent value is not used. The value is still returned when describing your service.", "title": "MaximumPercent", "type": "number" }, @@ -83900,7 +83848,7 @@ }, "Options": { "additionalProperties": true, - "markdownDescription": "The configuration options to send to the log driver.\n\nThe options you can specify depend on the log driver. Some of the options you can specify when you use the `awslogs` log driver to route logs to Amazon CloudWatch include the following:\n\n- **awslogs-create-group** - Required: No\n\nSpecify whether you want the log group to be created automatically. If this option isn't specified, it defaults to `false` .\n\n> Your IAM policy must include the `logs:CreateLogGroup` permission before you attempt to use `awslogs-create-group` .\n- **awslogs-region** - Required: Yes\n\nSpecify the AWS Region that the `awslogs` log driver is to send your Docker logs to. You can choose to send all of your logs from clusters in different Regions to a single region in CloudWatch Logs. This is so that they're all visible in one location. Otherwise, you can separate them by Region for more granularity. Make sure that the specified log group exists in the Region that you specify with this option.\n- **awslogs-group** - Required: Yes\n\nMake sure to specify a log group that the `awslogs` log driver sends its log streams to.\n- **awslogs-stream-prefix** - Required: Yes, when using the Fargate launch type.Optional for the EC2 launch type, required for the Fargate launch type.\n\nUse the `awslogs-stream-prefix` option to associate a log stream with the specified prefix, the container name, and the ID of the Amazon ECS task that the container belongs to. If you specify a prefix with this option, then the log stream takes the format `prefix-name/container-name/ecs-task-id` .\n\nIf you don't specify a prefix with this option, then the log stream is named after the container ID that's assigned by the Docker daemon on the container instance. Because it's difficult to trace logs back to the container that sent them with just the Docker container ID (which is only available on the container instance), we recommend that you specify a prefix with this option.\n\nFor Amazon ECS services, you can use the service name as the prefix. Doing so, you can trace log streams to the service that the container belongs to, the name of the container that sent them, and the ID of the task that the container belongs to.\n\nYou must specify a stream-prefix for your logs to have your logs appear in the Log pane when using the Amazon ECS console.\n- **awslogs-datetime-format** - Required: No\n\nThis option defines a multiline start pattern in Python `strftime` format. A log message consists of a line that matches the pattern and any following lines that don\u2019t match the pattern. The matched line is the delimiter between log messages.\n\nOne example of a use case for using this format is for parsing output such as a stack dump, which might otherwise be logged in multiple entries. The correct pattern allows it to be captured in a single entry.\n\nFor more information, see [awslogs-datetime-format](https://docs.aws.amazon.com/https://docs.docker.com/config/containers/logging/awslogs/#awslogs-datetime-format) .\n\nYou cannot configure both the `awslogs-datetime-format` and `awslogs-multiline-pattern` options.\n\n> Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.\n- **awslogs-multiline-pattern** - Required: No\n\nThis option defines a multiline start pattern that uses a regular expression. A log message consists of a line that matches the pattern and any following lines that don\u2019t match the pattern. The matched line is the delimiter between log messages.\n\nFor more information, see [awslogs-multiline-pattern](https://docs.aws.amazon.com/https://docs.docker.com/config/containers/logging/awslogs/#awslogs-multiline-pattern) .\n\nThis option is ignored if `awslogs-datetime-format` is also configured.\n\nYou cannot configure both the `awslogs-datetime-format` and `awslogs-multiline-pattern` options.\n\n> Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.\n- **mode** - Required: No\n\nValid values: `non-blocking` | `blocking`\n\nThis option defines the delivery mode of log messages from the container to CloudWatch Logs. The delivery mode you choose affects application availability when the flow of logs from container to CloudWatch is interrupted.\n\nIf you use the `blocking` mode and the flow of logs to CloudWatch is interrupted, calls from container code to write to the `stdout` and `stderr` streams will block. The logging thread of the application will block as a result. This may cause the application to become unresponsive and lead to container healthcheck failure.\n\nIf you use the `non-blocking` mode, the container's logs are instead stored in an in-memory intermediate buffer configured with the `max-buffer-size` option. This prevents the application from becoming unresponsive when logs cannot be sent to CloudWatch. We recommend using this mode if you want to ensure service availability and are okay with some log loss. For more information, see [Preventing log loss with non-blocking mode in the `awslogs` container log driver](https://docs.aws.amazon.com/containers/preventing-log-loss-with-non-blocking-mode-in-the-awslogs-container-log-driver/) .\n- **max-buffer-size** - Required: No\n\nDefault value: `1m`\n\nWhen `non-blocking` mode is used, the `max-buffer-size` log option controls the size of the buffer that's used for intermediate message storage. Make sure to specify an adequate buffer size based on your application. When the buffer fills up, further logs cannot be stored. Logs that cannot be stored are lost.\n\nTo route logs using the `splunk` log router, you need to specify a `splunk-token` and a `splunk-url` .\n\nWhen you use the `awsfirelens` log router to route logs to an AWS Service or AWS Partner Network destination for log storage and analytics, you can set the `log-driver-buffer-limit` option to limit the number of events that are buffered in memory, before being sent to the log router container. It can help to resolve potential log loss issue because high throughput might result in memory running out for the buffer inside of Docker.\n\nOther options you can specify when using `awsfirelens` to route logs depend on the destination. When you export logs to Amazon Data Firehose, you can specify the AWS Region with `region` and a name for the log stream with `delivery_stream` .\n\nWhen you export logs to Amazon Kinesis Data Streams, you can specify an AWS Region with `region` and a data stream name with `stream` .\n\nWhen you export logs to Amazon OpenSearch Service, you can specify options like `Name` , `Host` (OpenSearch Service endpoint without protocol), `Port` , `Index` , `Type` , `Aws_auth` , `Aws_region` , `Suppress_Type_Name` , and `tls` .\n\nWhen you export logs to Amazon S3, you can specify the bucket using the `bucket` option. You can also specify `region` , `total_file_size` , `upload_timeout` , and `use_put_object` as options.\n\nThis parameter requires version 1.19 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`", + "markdownDescription": "The configuration options to send to the log driver.\n\nThe options you can specify depend on the log driver. Some of the options you can specify when you use the `awslogs` log driver to route logs to Amazon CloudWatch include the following:\n\n- **awslogs-create-group** - Required: No\n\nSpecify whether you want the log group to be created automatically. If this option isn't specified, it defaults to `false` .\n\n> Your IAM policy must include the `logs:CreateLogGroup` permission before you attempt to use `awslogs-create-group` .\n- **awslogs-region** - Required: Yes\n\nSpecify the AWS Region that the `awslogs` log driver is to send your Docker logs to. You can choose to send all of your logs from clusters in different Regions to a single region in CloudWatch Logs. This is so that they're all visible in one location. Otherwise, you can separate them by Region for more granularity. Make sure that the specified log group exists in the Region that you specify with this option.\n- **awslogs-group** - Required: Yes\n\nMake sure to specify a log group that the `awslogs` log driver sends its log streams to.\n- **awslogs-stream-prefix** - Required: Yes, when using the Fargate launch type.Optional for the EC2 launch type, required for the Fargate launch type.\n\nUse the `awslogs-stream-prefix` option to associate a log stream with the specified prefix, the container name, and the ID of the Amazon ECS task that the container belongs to. If you specify a prefix with this option, then the log stream takes the format `prefix-name/container-name/ecs-task-id` .\n\nIf you don't specify a prefix with this option, then the log stream is named after the container ID that's assigned by the Docker daemon on the container instance. Because it's difficult to trace logs back to the container that sent them with just the Docker container ID (which is only available on the container instance), we recommend that you specify a prefix with this option.\n\nFor Amazon ECS services, you can use the service name as the prefix. Doing so, you can trace log streams to the service that the container belongs to, the name of the container that sent them, and the ID of the task that the container belongs to.\n\nYou must specify a stream-prefix for your logs to have your logs appear in the Log pane when using the Amazon ECS console.\n- **awslogs-datetime-format** - Required: No\n\nThis option defines a multiline start pattern in Python `strftime` format. A log message consists of a line that matches the pattern and any following lines that don\u2019t match the pattern. The matched line is the delimiter between log messages.\n\nOne example of a use case for using this format is for parsing output such as a stack dump, which might otherwise be logged in multiple entries. The correct pattern allows it to be captured in a single entry.\n\nFor more information, see [awslogs-datetime-format](https://docs.aws.amazon.com/https://docs.docker.com/config/containers/logging/awslogs/#awslogs-datetime-format) .\n\nYou cannot configure both the `awslogs-datetime-format` and `awslogs-multiline-pattern` options.\n\n> Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.\n- **awslogs-multiline-pattern** - Required: No\n\nThis option defines a multiline start pattern that uses a regular expression. A log message consists of a line that matches the pattern and any following lines that don\u2019t match the pattern. The matched line is the delimiter between log messages.\n\nFor more information, see [awslogs-multiline-pattern](https://docs.aws.amazon.com/https://docs.docker.com/config/containers/logging/awslogs/#awslogs-multiline-pattern) .\n\nThis option is ignored if `awslogs-datetime-format` is also configured.\n\nYou cannot configure both the `awslogs-datetime-format` and `awslogs-multiline-pattern` options.\n\n> Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.\n- **mode** - Required: No\n\nValid values: `non-blocking` | `blocking`\n\nThis option defines the delivery mode of log messages from the container to CloudWatch Logs. The delivery mode you choose affects application availability when the flow of logs from container to CloudWatch is interrupted.\n\nIf you use the `blocking` mode and the flow of logs to CloudWatch is interrupted, calls from container code to write to the `stdout` and `stderr` streams will block. The logging thread of the application will block as a result. This may cause the application to become unresponsive and lead to container healthcheck failure.\n\nIf you use the `non-blocking` mode, the container's logs are instead stored in an in-memory intermediate buffer configured with the `max-buffer-size` option. This prevents the application from becoming unresponsive when logs cannot be sent to CloudWatch. We recommend using this mode if you want to ensure service availability and are okay with some log loss. For more information, see [Preventing log loss with non-blocking mode in the `awslogs` container log driver](https://docs.aws.amazon.com/containers/preventing-log-loss-with-non-blocking-mode-in-the-awslogs-container-log-driver/) .\n- **max-buffer-size** - Required: No\n\nDefault value: `1m`\n\nWhen `non-blocking` mode is used, the `max-buffer-size` log option controls the size of the buffer that's used for intermediate message storage. Make sure to specify an adequate buffer size based on your application. When the buffer fills up, further logs cannot be stored. Logs that cannot be stored are lost.\n\nTo route logs using the `splunk` log router, you need to specify a `splunk-token` and a `splunk-url` .\n\nWhen you use the `awsfirelens` log router to route logs to an AWS Service or AWS Partner Network destination for log storage and analytics, you can set the `log-driver-buffer-limit` option to limit the number of events that are buffered in memory, before being sent to the log router container. It can help to resolve potential log loss issue because high throughput might result in memory running out for the buffer inside of Docker.\n\nOther options you can specify when using `awsfirelens` to route logs depend on the destination. When you export logs to Amazon Data Firehose, you can specify the AWS Region with `region` and a name for the log stream with `delivery_stream` .\n\nWhen you export logs to Amazon Kinesis Data Streams, you can specify an AWS Region with `region` and a data stream name with `stream` .\n\nWhen you export logs to Amazon OpenSearch Service, you can specify options like `Name` , `Host` (OpenSearch Service endpoint without protocol), `Port` , `Index` , `Type` , `Aws_auth` , `Aws_region` , `Suppress_Type_Name` , and `tls` . For more information, see [Under the hood: FireLens for Amazon ECS Tasks](https://docs.aws.amazon.com/containers/under-the-hood-firelens-for-amazon-ecs-tasks/) .\n\nWhen you export logs to Amazon S3, you can specify the bucket using the `bucket` option. You can also specify `region` , `total_file_size` , `upload_timeout` , and `use_put_object` as options.\n\nThis parameter requires version 1.19 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" @@ -84824,7 +84772,7 @@ "additionalProperties": false, "properties": { "SizeInGiB": { - "markdownDescription": "The total amount, in GiB, of ephemeral storage to set for the task. The minimum supported value is `20` GiB and the maximum supported value is `200` GiB.", + "markdownDescription": "The total amount, in GiB, of ephemeral storage to set for the task. The minimum supported value is `21` GiB and the maximum supported value is `200` GiB.", "title": "SizeInGiB", "type": "number" } @@ -85070,7 +85018,7 @@ }, "Options": { "additionalProperties": true, - "markdownDescription": "The configuration options to send to the log driver.\n\nThe options you can specify depend on the log driver. Some of the options you can specify when you use the `awslogs` log driver to route logs to Amazon CloudWatch include the following:\n\n- **awslogs-create-group** - Required: No\n\nSpecify whether you want the log group to be created automatically. If this option isn't specified, it defaults to `false` .\n\n> Your IAM policy must include the `logs:CreateLogGroup` permission before you attempt to use `awslogs-create-group` .\n- **awslogs-region** - Required: Yes\n\nSpecify the AWS Region that the `awslogs` log driver is to send your Docker logs to. You can choose to send all of your logs from clusters in different Regions to a single region in CloudWatch Logs. This is so that they're all visible in one location. Otherwise, you can separate them by Region for more granularity. Make sure that the specified log group exists in the Region that you specify with this option.\n- **awslogs-group** - Required: Yes\n\nMake sure to specify a log group that the `awslogs` log driver sends its log streams to.\n- **awslogs-stream-prefix** - Required: Yes, when using the Fargate launch type.Optional for the EC2 launch type, required for the Fargate launch type.\n\nUse the `awslogs-stream-prefix` option to associate a log stream with the specified prefix, the container name, and the ID of the Amazon ECS task that the container belongs to. If you specify a prefix with this option, then the log stream takes the format `prefix-name/container-name/ecs-task-id` .\n\nIf you don't specify a prefix with this option, then the log stream is named after the container ID that's assigned by the Docker daemon on the container instance. Because it's difficult to trace logs back to the container that sent them with just the Docker container ID (which is only available on the container instance), we recommend that you specify a prefix with this option.\n\nFor Amazon ECS services, you can use the service name as the prefix. Doing so, you can trace log streams to the service that the container belongs to, the name of the container that sent them, and the ID of the task that the container belongs to.\n\nYou must specify a stream-prefix for your logs to have your logs appear in the Log pane when using the Amazon ECS console.\n- **awslogs-datetime-format** - Required: No\n\nThis option defines a multiline start pattern in Python `strftime` format. A log message consists of a line that matches the pattern and any following lines that don\u2019t match the pattern. The matched line is the delimiter between log messages.\n\nOne example of a use case for using this format is for parsing output such as a stack dump, which might otherwise be logged in multiple entries. The correct pattern allows it to be captured in a single entry.\n\nFor more information, see [awslogs-datetime-format](https://docs.aws.amazon.com/https://docs.docker.com/config/containers/logging/awslogs/#awslogs-datetime-format) .\n\nYou cannot configure both the `awslogs-datetime-format` and `awslogs-multiline-pattern` options.\n\n> Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.\n- **awslogs-multiline-pattern** - Required: No\n\nThis option defines a multiline start pattern that uses a regular expression. A log message consists of a line that matches the pattern and any following lines that don\u2019t match the pattern. The matched line is the delimiter between log messages.\n\nFor more information, see [awslogs-multiline-pattern](https://docs.aws.amazon.com/https://docs.docker.com/config/containers/logging/awslogs/#awslogs-multiline-pattern) .\n\nThis option is ignored if `awslogs-datetime-format` is also configured.\n\nYou cannot configure both the `awslogs-datetime-format` and `awslogs-multiline-pattern` options.\n\n> Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.\n- **mode** - Required: No\n\nValid values: `non-blocking` | `blocking`\n\nThis option defines the delivery mode of log messages from the container to CloudWatch Logs. The delivery mode you choose affects application availability when the flow of logs from container to CloudWatch is interrupted.\n\nIf you use the `blocking` mode and the flow of logs to CloudWatch is interrupted, calls from container code to write to the `stdout` and `stderr` streams will block. The logging thread of the application will block as a result. This may cause the application to become unresponsive and lead to container healthcheck failure.\n\nIf you use the `non-blocking` mode, the container's logs are instead stored in an in-memory intermediate buffer configured with the `max-buffer-size` option. This prevents the application from becoming unresponsive when logs cannot be sent to CloudWatch. We recommend using this mode if you want to ensure service availability and are okay with some log loss. For more information, see [Preventing log loss with non-blocking mode in the `awslogs` container log driver](https://docs.aws.amazon.com/containers/preventing-log-loss-with-non-blocking-mode-in-the-awslogs-container-log-driver/) .\n- **max-buffer-size** - Required: No\n\nDefault value: `1m`\n\nWhen `non-blocking` mode is used, the `max-buffer-size` log option controls the size of the buffer that's used for intermediate message storage. Make sure to specify an adequate buffer size based on your application. When the buffer fills up, further logs cannot be stored. Logs that cannot be stored are lost.\n\nTo route logs using the `splunk` log router, you need to specify a `splunk-token` and a `splunk-url` .\n\nWhen you use the `awsfirelens` log router to route logs to an AWS Service or AWS Partner Network destination for log storage and analytics, you can set the `log-driver-buffer-limit` option to limit the number of events that are buffered in memory, before being sent to the log router container. It can help to resolve potential log loss issue because high throughput might result in memory running out for the buffer inside of Docker.\n\nOther options you can specify when using `awsfirelens` to route logs depend on the destination. When you export logs to Amazon Data Firehose, you can specify the AWS Region with `region` and a name for the log stream with `delivery_stream` .\n\nWhen you export logs to Amazon Kinesis Data Streams, you can specify an AWS Region with `region` and a data stream name with `stream` .\n\nWhen you export logs to Amazon OpenSearch Service, you can specify options like `Name` , `Host` (OpenSearch Service endpoint without protocol), `Port` , `Index` , `Type` , `Aws_auth` , `Aws_region` , `Suppress_Type_Name` , and `tls` .\n\nWhen you export logs to Amazon S3, you can specify the bucket using the `bucket` option. You can also specify `region` , `total_file_size` , `upload_timeout` , and `use_put_object` as options.\n\nThis parameter requires version 1.19 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`", + "markdownDescription": "The configuration options to send to the log driver.\n\nThe options you can specify depend on the log driver. Some of the options you can specify when you use the `awslogs` log driver to route logs to Amazon CloudWatch include the following:\n\n- **awslogs-create-group** - Required: No\n\nSpecify whether you want the log group to be created automatically. If this option isn't specified, it defaults to `false` .\n\n> Your IAM policy must include the `logs:CreateLogGroup` permission before you attempt to use `awslogs-create-group` .\n- **awslogs-region** - Required: Yes\n\nSpecify the AWS Region that the `awslogs` log driver is to send your Docker logs to. You can choose to send all of your logs from clusters in different Regions to a single region in CloudWatch Logs. This is so that they're all visible in one location. Otherwise, you can separate them by Region for more granularity. Make sure that the specified log group exists in the Region that you specify with this option.\n- **awslogs-group** - Required: Yes\n\nMake sure to specify a log group that the `awslogs` log driver sends its log streams to.\n- **awslogs-stream-prefix** - Required: Yes, when using the Fargate launch type.Optional for the EC2 launch type, required for the Fargate launch type.\n\nUse the `awslogs-stream-prefix` option to associate a log stream with the specified prefix, the container name, and the ID of the Amazon ECS task that the container belongs to. If you specify a prefix with this option, then the log stream takes the format `prefix-name/container-name/ecs-task-id` .\n\nIf you don't specify a prefix with this option, then the log stream is named after the container ID that's assigned by the Docker daemon on the container instance. Because it's difficult to trace logs back to the container that sent them with just the Docker container ID (which is only available on the container instance), we recommend that you specify a prefix with this option.\n\nFor Amazon ECS services, you can use the service name as the prefix. Doing so, you can trace log streams to the service that the container belongs to, the name of the container that sent them, and the ID of the task that the container belongs to.\n\nYou must specify a stream-prefix for your logs to have your logs appear in the Log pane when using the Amazon ECS console.\n- **awslogs-datetime-format** - Required: No\n\nThis option defines a multiline start pattern in Python `strftime` format. A log message consists of a line that matches the pattern and any following lines that don\u2019t match the pattern. The matched line is the delimiter between log messages.\n\nOne example of a use case for using this format is for parsing output such as a stack dump, which might otherwise be logged in multiple entries. The correct pattern allows it to be captured in a single entry.\n\nFor more information, see [awslogs-datetime-format](https://docs.aws.amazon.com/https://docs.docker.com/config/containers/logging/awslogs/#awslogs-datetime-format) .\n\nYou cannot configure both the `awslogs-datetime-format` and `awslogs-multiline-pattern` options.\n\n> Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.\n- **awslogs-multiline-pattern** - Required: No\n\nThis option defines a multiline start pattern that uses a regular expression. A log message consists of a line that matches the pattern and any following lines that don\u2019t match the pattern. The matched line is the delimiter between log messages.\n\nFor more information, see [awslogs-multiline-pattern](https://docs.aws.amazon.com/https://docs.docker.com/config/containers/logging/awslogs/#awslogs-multiline-pattern) .\n\nThis option is ignored if `awslogs-datetime-format` is also configured.\n\nYou cannot configure both the `awslogs-datetime-format` and `awslogs-multiline-pattern` options.\n\n> Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.\n- **mode** - Required: No\n\nValid values: `non-blocking` | `blocking`\n\nThis option defines the delivery mode of log messages from the container to CloudWatch Logs. The delivery mode you choose affects application availability when the flow of logs from container to CloudWatch is interrupted.\n\nIf you use the `blocking` mode and the flow of logs to CloudWatch is interrupted, calls from container code to write to the `stdout` and `stderr` streams will block. The logging thread of the application will block as a result. This may cause the application to become unresponsive and lead to container healthcheck failure.\n\nIf you use the `non-blocking` mode, the container's logs are instead stored in an in-memory intermediate buffer configured with the `max-buffer-size` option. This prevents the application from becoming unresponsive when logs cannot be sent to CloudWatch. We recommend using this mode if you want to ensure service availability and are okay with some log loss. For more information, see [Preventing log loss with non-blocking mode in the `awslogs` container log driver](https://docs.aws.amazon.com/containers/preventing-log-loss-with-non-blocking-mode-in-the-awslogs-container-log-driver/) .\n- **max-buffer-size** - Required: No\n\nDefault value: `1m`\n\nWhen `non-blocking` mode is used, the `max-buffer-size` log option controls the size of the buffer that's used for intermediate message storage. Make sure to specify an adequate buffer size based on your application. When the buffer fills up, further logs cannot be stored. Logs that cannot be stored are lost.\n\nTo route logs using the `splunk` log router, you need to specify a `splunk-token` and a `splunk-url` .\n\nWhen you use the `awsfirelens` log router to route logs to an AWS Service or AWS Partner Network destination for log storage and analytics, you can set the `log-driver-buffer-limit` option to limit the number of events that are buffered in memory, before being sent to the log router container. It can help to resolve potential log loss issue because high throughput might result in memory running out for the buffer inside of Docker.\n\nOther options you can specify when using `awsfirelens` to route logs depend on the destination. When you export logs to Amazon Data Firehose, you can specify the AWS Region with `region` and a name for the log stream with `delivery_stream` .\n\nWhen you export logs to Amazon Kinesis Data Streams, you can specify an AWS Region with `region` and a data stream name with `stream` .\n\nWhen you export logs to Amazon OpenSearch Service, you can specify options like `Name` , `Host` (OpenSearch Service endpoint without protocol), `Port` , `Index` , `Type` , `Aws_auth` , `Aws_region` , `Suppress_Type_Name` , and `tls` . For more information, see [Under the hood: FireLens for Amazon ECS Tasks](https://docs.aws.amazon.com/containers/under-the-hood-firelens-for-amazon-ecs-tasks/) .\n\nWhen you export logs to Amazon S3, you can specify the bucket using the `bucket` option. You can also specify `region` , `total_file_size` , `upload_timeout` , and `use_put_object` as options.\n\nThis parameter requires version 1.19 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" @@ -85516,7 +85464,7 @@ "additionalProperties": false, "properties": { "AssignPublicIp": { - "markdownDescription": "Whether the task's elastic network interface receives a public IP address. The default value is `DISABLED` .", + "markdownDescription": "Whether the task's elastic network interface receives a public IP address. The default value is `ENABLED` .", "title": "AssignPublicIp", "type": "string" }, @@ -85959,7 +85907,7 @@ "additionalProperties": false, "properties": { "ReplicationOverwriteProtection": { - "markdownDescription": "The status of the file system's replication overwrite protection.\n\n- `ENABLED` \u2013 The file system cannot be used as the destination file system in a replication configuration. The file system is writeable. Replication overwrite protection is `ENABLED` by default.\n- `DISABLED` \u2013 The file system can be used as the destination file system in a replication configuration. The file system is read-only and can only be modified by EFS replication.\n- `REPLICATING` \u2013 The file system is being used as the destination file system in a replication configuration. The file system is read-only and is only modified only by EFS replication.\n\nIf the replication configuration is deleted, the file system's replication overwrite protection is re-enabled, the file system becomes writeable.", + "markdownDescription": "The status of the file system's replication overwrite protection.\n\n- `ENABLED` \u2013 The file system cannot be used as the destination file system in a replication configuration. The file system is writeable. Replication overwrite protection is `ENABLED` by default.\n- `DISABLED` \u2013 The file system can be used as the destination file system in a replication configuration. The file system is read-only and can only be modified by EFS replication.\n- `REPLICATING` \u2013 The file system is being used as the destination file system in a replication configuration. The file system is read-only and is modified only by EFS replication.\n\nIf the replication configuration is deleted, the file system's replication overwrite protection is re-enabled, the file system becomes writeable.", "title": "ReplicationOverwriteProtection", "type": "string" } @@ -86552,7 +86500,7 @@ "additionalProperties": false, "properties": { "IpFamily": { - "markdownDescription": "Specify which IP family is used to assign Kubernetes pod and service IP addresses. If you don't specify a value, `ipv4` is used by default. You can only specify an IP family when you create a cluster and can't change this value once the cluster is created. If you specify `ipv6` , the VPC and subnets that you specify for cluster creation must have both `IPv4` and `IPv6` CIDR blocks assigned to them. You can't specify `ipv6` for clusters in China Regions.\n\nYou can only specify `ipv6` for `1.21` and later clusters that use version `1.10.1` or later of the Amazon VPC CNI add-on. If you specify `ipv6` , then ensure that your VPC meets the requirements listed in the considerations listed in [Assigning IPv6 addresses to pods and services](https://docs.aws.amazon.com/eks/latest/userguide/cni-ipv6.html) in the Amazon EKS User Guide. Kubernetes assigns services `IPv6` addresses from the unique local address range `(fc00::/7)` . You can't specify a custom `IPv6` CIDR block. Pod addresses are assigned from the subnet's `IPv6` CIDR.", + "markdownDescription": "Specify which IP family is used to assign Kubernetes pod and service IP addresses. If you don't specify a value, `ipv4` is used by default. You can only specify an IP family when you create a cluster and can't change this value once the cluster is created. If you specify `ipv6` , the VPC and subnets that you specify for cluster creation must have both `IPv4` and `IPv6` CIDR blocks assigned to them. You can't specify `ipv6` for clusters in China Regions.\n\nYou can only specify `ipv6` for `1.21` and later clusters that use version `1.10.1` or later of the Amazon VPC CNI add-on. If you specify `ipv6` , then ensure that your VPC meets the requirements listed in the considerations listed in [Assigning IPv6 addresses to pods and services](https://docs.aws.amazon.com/eks/latest/userguide/cni-ipv6.html) in the *Amazon EKS User Guide* . Kubernetes assigns services `IPv6` addresses from the unique local address range `(fc00::/7)` . You can't specify a custom `IPv6` CIDR block. Pod addresses are assigned from the subnet's `IPv6` CIDR.", "title": "IpFamily", "type": "string" }, @@ -94366,7 +94314,7 @@ "additionalProperties": false, "properties": { "Key": { - "markdownDescription": "The name of the attribute.\n\nThe following attributes are supported by all load balancers:\n\n- `deregistration_delay.timeout_seconds` - The amount of time, in seconds, for Elastic Load Balancing to wait before changing the state of a deregistering target from `draining` to `unused` . The range is 0-3600 seconds. The default value is 300 seconds. If the target is a Lambda function, this attribute is not supported.\n- `stickiness.enabled` - Indicates whether target stickiness is enabled. The value is `true` or `false` . The default is `false` .\n- `stickiness.type` - Indicates the type of stickiness. The possible values are:\n\n- `lb_cookie` and `app_cookie` for Application Load Balancers.\n- `source_ip` for Network Load Balancers.\n- `source_ip_dest_ip` and `source_ip_dest_ip_proto` for Gateway Load Balancers.\n\nThe following attributes are supported by Application Load Balancers and Network Load Balancers:\n\n- `load_balancing.cross_zone.enabled` - Indicates whether cross zone load balancing is enabled. The value is `true` , `false` or `use_load_balancer_configuration` . The default is `use_load_balancer_configuration` .\n- `target_group_health.dns_failover.minimum_healthy_targets.count` - The minimum number of targets that must be healthy. If the number of healthy targets is below this value, mark the zone as unhealthy in DNS, so that traffic is routed only to healthy zones. The possible values are `off` or an integer from 1 to the maximum number of targets. The default is `off` .\n- `target_group_health.dns_failover.minimum_healthy_targets.percentage` - The minimum percentage of targets that must be healthy. If the percentage of healthy targets is below this value, mark the zone as unhealthy in DNS, so that traffic is routed only to healthy zones. The possible values are `off` or an integer from 1 to 100. The default is `off` .\n- `target_group_health.unhealthy_state_routing.minimum_healthy_targets.count` - The minimum number of targets that must be healthy. If the number of healthy targets is below this value, send traffic to all targets, including unhealthy targets. The possible values are 1 to the maximum number of targets. The default is 1.\n- `target_group_health.unhealthy_state_routing.minimum_healthy_targets.percentage` - The minimum percentage of targets that must be healthy. If the percentage of healthy targets is below this value, send traffic to all targets, including unhealthy targets. The possible values are `off` or an integer from 1 to 100. The default is `off` .\n\nThe following attributes are supported only if the load balancer is an Application Load Balancer and the target is an instance or an IP address:\n\n- `load_balancing.algorithm.type` - The load balancing algorithm determines how the load balancer selects targets when routing requests. The value is `round_robin` , `least_outstanding_requests` , or `weighted_random` . The default is `round_robin` .\n- `load_balancing.algorithm.anomaly_mitigation` - Only available when `load_balancing.algorithm.type` is `weighted_random` . Indicates whether anomaly mitigation is enabled. The value is `on` or `off` . The default is `off` .\n- `slow_start.duration_seconds` - The time period, in seconds, during which a newly registered target receives an increasing share of the traffic to the target group. After this time period ends, the target receives its full share of traffic. The range is 30-900 seconds (15 minutes). The default is 0 seconds (disabled).\n- `stickiness.app_cookie.cookie_name` - Indicates the name of the application-based cookie. Names that start with the following prefixes are not allowed: `AWSALB` , `AWSALBAPP` , and `AWSALBTG` ; they're reserved for use by the load balancer.\n- `stickiness.app_cookie.duration_seconds` - The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the application-based cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds).\n- `stickiness.lb_cookie.duration_seconds` - The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the load balancer-generated cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds).\n\nThe following attribute is supported only if the load balancer is an Application Load Balancer and the target is a Lambda function:\n\n- `lambda.multi_value_headers.enabled` - Indicates whether the request and response headers that are exchanged between the load balancer and the Lambda function include arrays of values or strings. The value is `true` or `false` . The default is `false` . If the value is `false` and the request contains a duplicate header field name or query parameter key, the load balancer uses the last value sent by the client.\n\nThe following attributes are supported only by Network Load Balancers:\n\n- `deregistration_delay.connection_termination.enabled` - Indicates whether the load balancer terminates connections at the end of the deregistration timeout. The value is `true` or `false` . For new UDP/TCP_UDP target groups the default is `true` . Otherwise, the default is `false` .\n- `preserve_client_ip.enabled` - Indicates whether client IP preservation is enabled. The value is `true` or `false` . The default is disabled if the target group type is IP address and the target group protocol is TCP or TLS. Otherwise, the default is enabled. Client IP preservation can't be disabled for UDP and TCP_UDP target groups.\n- `proxy_protocol_v2.enabled` - Indicates whether Proxy Protocol version 2 is enabled. The value is `true` or `false` . The default is `false` .\n- `target_health_state.unhealthy.connection_termination.enabled` - Indicates whether the load balancer terminates connections to unhealthy targets. The value is `true` or `false` . The default is `true` .\n- `target_health_state.unhealthy.draining_interval_seconds` - The amount of time for Elastic Load Balancing to wait before changing the state of an unhealthy target from `unhealthy.draining` to `unhealthy` . The range is 0-360000 seconds. The default value is 0 seconds.\n\nNote: This attribute can only be configured when `target_health_state.unhealthy.connection_termination.enabled` is `false` .\n\nThe following attributes are supported only by Gateway Load Balancers:\n\n- `target_failover.on_deregistration` - Indicates how the Gateway Load Balancer handles existing flows when a target is deregistered. The possible values are `rebalance` and `no_rebalance` . The default is `no_rebalance` . The two attributes ( `target_failover.on_deregistration` and `target_failover.on_unhealthy` ) can't be set independently. The value you set for both attributes must be the same.\n- `target_failover.on_unhealthy` - Indicates how the Gateway Load Balancer handles existing flows when a target is unhealthy. The possible values are `rebalance` and `no_rebalance` . The default is `no_rebalance` . The two attributes ( `target_failover.on_deregistration` and `target_failover.on_unhealthy` ) can't be set independently. The value you set for both attributes must be the same.", + "markdownDescription": "The name of the attribute.\n\nThe following attributes are supported by all load balancers:\n\n- `deregistration_delay.timeout_seconds` - The amount of time, in seconds, for Elastic Load Balancing to wait before changing the state of a deregistering target from `draining` to `unused` . The range is 0-3600 seconds. The default value is 300 seconds. If the target is a Lambda function, this attribute is not supported.\n- `stickiness.enabled` - Indicates whether target stickiness is enabled. The value is `true` or `false` . The default is `false` .\n- `stickiness.type` - Indicates the type of stickiness. The possible values are:\n\n- `lb_cookie` and `app_cookie` for Application Load Balancers.\n- `source_ip` for Network Load Balancers.\n- `source_ip_dest_ip` and `source_ip_dest_ip_proto` for Gateway Load Balancers.\n\nThe following attributes are supported by Application Load Balancers and Network Load Balancers:\n\n- `load_balancing.cross_zone.enabled` - Indicates whether cross zone load balancing is enabled. The value is `true` , `false` or `use_load_balancer_configuration` . The default is `use_load_balancer_configuration` .\n- `target_group_health.dns_failover.minimum_healthy_targets.count` - The minimum number of targets that must be healthy. If the number of healthy targets is below this value, mark the zone as unhealthy in DNS, so that traffic is routed only to healthy zones. The possible values are `off` or an integer from 1 to the maximum number of targets. The default is `off` .\n- `target_group_health.dns_failover.minimum_healthy_targets.percentage` - The minimum percentage of targets that must be healthy. If the percentage of healthy targets is below this value, mark the zone as unhealthy in DNS, so that traffic is routed only to healthy zones. The possible values are `off` or an integer from 1 to 100. The default is `off` .\n- `target_group_health.unhealthy_state_routing.minimum_healthy_targets.count` - The minimum number of targets that must be healthy. If the number of healthy targets is below this value, send traffic to all targets, including unhealthy targets. The possible values are 1 to the maximum number of targets. The default is 1.\n- `target_group_health.unhealthy_state_routing.minimum_healthy_targets.percentage` - The minimum percentage of targets that must be healthy. If the percentage of healthy targets is below this value, send traffic to all targets, including unhealthy targets. The possible values are `off` or an integer from 1 to 100. The default is `off` .\n\nThe following attributes are supported only if the load balancer is an Application Load Balancer and the target is an instance or an IP address:\n\n- `load_balancing.algorithm.type` - The load balancing algorithm determines how the load balancer selects targets when routing requests. The value is `round_robin` , `least_outstanding_requests` , or `weighted_random` . The default is `round_robin` .\n- `load_balancing.algorithm.anomaly_mitigation` - Only available when `load_balancing.algorithm.type` is `weighted_random` . Indicates whether anomaly mitigation is enabled. The value is `on` or `off` . The default is `off` .\n- `slow_start.duration_seconds` - The time period, in seconds, during which a newly registered target receives an increasing share of the traffic to the target group. After this time period ends, the target receives its full share of traffic. The range is 30-900 seconds (15 minutes). The default is 0 seconds (disabled).\n- `stickiness.app_cookie.cookie_name` - Indicates the name of the application-based cookie. Names that start with the following prefixes are not allowed: `AWSALB` , `AWSALBAPP` , and `AWSALBTG` ; they're reserved for use by the load balancer.\n- `stickiness.app_cookie.duration_seconds` - The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the application-based cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds).\n- `stickiness.lb_cookie.duration_seconds` - The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the load balancer-generated cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds).\n\nThe following attribute is supported only if the load balancer is an Application Load Balancer and the target is a Lambda function:\n\n- `lambda.multi_value_headers.enabled` - Indicates whether the request and response headers that are exchanged between the load balancer and the Lambda function include arrays of values or strings. The value is `true` or `false` . The default is `false` . If the value is `false` and the request contains a duplicate header field name or query parameter key, the load balancer uses the last value sent by the client.\n\nThe following attributes are supported only by Network Load Balancers:\n\n- `deregistration_delay.connection_termination.enabled` - Indicates whether the load balancer terminates connections at the end of the deregistration timeout. The value is `true` or `false` . For new UDP/TCP_UDP target groups the default is `true` . Otherwise, the default is `false` .\n- `preserve_client_ip.enabled` - Indicates whether client IP preservation is enabled. The value is `true` or `false` . The default is disabled if the target group type is IP address and the target group protocol is TCP or TLS. Otherwise, the default is enabled. Client IP preservation can't be disabled for UDP and TCP_UDP target groups.\n- `proxy_protocol_v2.enabled` - Indicates whether Proxy Protocol version 2 is enabled. The value is `true` or `false` . The default is `false` .\n- `target_health_state.unhealthy.connection_termination.enabled` - Indicates whether the load balancer terminates connections to unhealthy targets. The value is `true` or `false` . The default is `true` . This attribute can't be enabled for UDP and TCP_UDP target groups.\n- `target_health_state.unhealthy.draining_interval_seconds` - The amount of time for Elastic Load Balancing to wait before changing the state of an unhealthy target from `unhealthy.draining` to `unhealthy` . The range is 0-360000 seconds. The default value is 0 seconds.\n\nNote: This attribute can only be configured when `target_health_state.unhealthy.connection_termination.enabled` is `false` .\n\nThe following attributes are supported only by Gateway Load Balancers:\n\n- `target_failover.on_deregistration` - Indicates how the Gateway Load Balancer handles existing flows when a target is deregistered. The possible values are `rebalance` and `no_rebalance` . The default is `no_rebalance` . The two attributes ( `target_failover.on_deregistration` and `target_failover.on_unhealthy` ) can't be set independently. The value you set for both attributes must be the same.\n- `target_failover.on_unhealthy` - Indicates how the Gateway Load Balancer handles existing flows when a target is unhealthy. The possible values are `rebalance` and `no_rebalance` . The default is `no_rebalance` . The two attributes ( `target_failover.on_deregistration` and `target_failover.on_unhealthy` ) can't be set independently. The value you set for both attributes must be the same.", "title": "Key", "type": "string" }, @@ -96541,7 +96489,7 @@ "properties": { "AuthParameters": { "$ref": "#/definitions/AWS::Events::Connection.AuthParameters", - "markdownDescription": "A `CreateConnectionAuthRequestParameters` object that contains the authorization parameters to use to authorize with the endpoint.", + "markdownDescription": "The authorization parameters to use to authorize with the endpoint.\n\nYou must include only authorization parameters for the `AuthorizationType` you specify.", "title": "AuthParameters" }, "AuthorizationType": { @@ -96675,7 +96623,7 @@ "items": { "$ref": "#/definitions/AWS::Events::Connection.Parameter" }, - "markdownDescription": "Contains additional body string parameters for the connection.", + "markdownDescription": "Any additional body string parameters for the connection.", "title": "BodyParameters", "type": "array" }, @@ -96683,7 +96631,7 @@ "items": { "$ref": "#/definitions/AWS::Events::Connection.Parameter" }, - "markdownDescription": "Contains additional header parameters for the connection.", + "markdownDescription": "Any additional header parameters for the connection.", "title": "HeaderParameters", "type": "array" }, @@ -96691,7 +96639,7 @@ "items": { "$ref": "#/definitions/AWS::Events::Connection.Parameter" }, - "markdownDescription": "Contains additional query string parameters for the connection.", + "markdownDescription": "Any additional query string parameters for the connection.", "title": "QueryStringParameters", "type": "array" } @@ -96708,7 +96656,7 @@ }, "ClientParameters": { "$ref": "#/definitions/AWS::Events::Connection.ClientParameters", - "markdownDescription": "A `CreateConnectionOAuthClientRequestParameters` object that contains the client parameters for OAuth authorization.", + "markdownDescription": "The client parameters for OAuth authorization.", "title": "ClientParameters" }, "HttpMethod": { @@ -96718,7 +96666,7 @@ }, "OAuthHttpParameters": { "$ref": "#/definitions/AWS::Events::Connection.ConnectionHttpParameters", - "markdownDescription": "A `ConnectionHttpParameters` object that contains details about the additional parameters to use for the connection.", + "markdownDescription": "Details about the additional parameters to use for the connection.", "title": "OAuthHttpParameters" } }, @@ -97693,12 +97641,12 @@ "additionalProperties": false, "properties": { "Name": { - "markdownDescription": "Name of parameter to start execution of a SageMaker Model Building Pipeline.", + "markdownDescription": "Name of parameter to start execution of a SageMaker AI Model Building Pipeline.", "title": "Name", "type": "string" }, "Value": { - "markdownDescription": "Value of parameter to start execution of a SageMaker Model Building Pipeline.", + "markdownDescription": "Value of parameter to start execution of a SageMaker AI Model Building Pipeline.", "title": "Value", "type": "string" } @@ -97716,7 +97664,7 @@ "items": { "$ref": "#/definitions/AWS::Events::Rule.SageMakerPipelineParameter" }, - "markdownDescription": "List of Parameter names and values for SageMaker Model Building Pipeline execution.", + "markdownDescription": "List of Parameter names and values for SageMaker AI Model Building Pipeline execution.", "title": "PipelineParameterList", "type": "array" } @@ -97802,7 +97750,7 @@ }, "RetryPolicy": { "$ref": "#/definitions/AWS::Events::Rule.RetryPolicy", - "markdownDescription": "The `RetryPolicy` object that contains the retry policy configuration to use for the dead-letter queue.", + "markdownDescription": "The retry policy configuration to use for the dead-letter queue.", "title": "RetryPolicy" }, "RoleArn": { @@ -97817,7 +97765,7 @@ }, "SageMakerPipelineParameters": { "$ref": "#/definitions/AWS::Events::Rule.SageMakerPipelineParameters", - "markdownDescription": "Contains the SageMaker Model Building Pipeline parameters to start execution of a SageMaker Model Building Pipeline.\n\nIf you specify a SageMaker Model Building Pipeline as a target, you can use this to specify parameters to start a pipeline execution based on EventBridge events.", + "markdownDescription": "Contains the SageMaker AI Model Building Pipeline parameters to start execution of a SageMaker AI Model Building Pipeline.\n\nIf you specify a SageMaker AI Model Building Pipeline as a target, you can use this to specify parameters to start a pipeline execution based on EventBridge events.", "title": "SageMakerPipelineParameters" }, "SqsParameters": { @@ -99714,7 +99662,7 @@ "type": "boolean" }, "DataRepositoryPath": { - "markdownDescription": "The path to the Amazon S3 data repository that will be linked to the file system. The path can be an S3 bucket or prefix in the format `s3://bucket-name/prefix/` . This path specifies where in the S3 data repository files will be imported from or exported to.", + "markdownDescription": "The path to the Amazon S3 data repository that will be linked to the file system. The path can be an S3 bucket or prefix in the format `s3://myBucket/myPrefix/` . This path specifies where in the S3 data repository files will be imported from or exported to.", "title": "DataRepositoryPath", "type": "string" }, @@ -99909,7 +99857,7 @@ "type": "number" }, "StorageType": { - "markdownDescription": "Sets the storage type for the file system that you're creating. Valid values are `SSD` and `HDD` .\n\n- Set to `SSD` to use solid state drive storage. SSD is supported on all Windows, Lustre, ONTAP, and OpenZFS deployment types.\n- Set to `HDD` to use hard disk drive storage. HDD is supported on `SINGLE_AZ_2` and `MULTI_AZ_1` Windows file system deployment types, and on `PERSISTENT_1` Lustre file system deployment types.\n\nDefault value is `SSD` . For more information, see [Storage type options](https://docs.aws.amazon.com/fsx/latest/WindowsGuide/optimize-fsx-costs.html#storage-type-options) in the *FSx for Windows File Server User Guide* and [Multiple storage options](https://docs.aws.amazon.com/fsx/latest/LustreGuide/what-is.html#storage-options) in the *FSx for Lustre User Guide* .", + "markdownDescription": "Sets the storage class for the file system that you're creating. Valid values are `SSD` , `HDD` , and `INTELLIGENT_TIERING` .\n\n- Set to `SSD` to use solid state drive storage. SSD is supported on all Windows, Lustre, ONTAP, and OpenZFS deployment types.\n- Set to `HDD` to use hard disk drive storage. HDD is supported on `SINGLE_AZ_2` and `MULTI_AZ_1` Windows file system deployment types, and on `PERSISTENT_1` Lustre file system deployment types.\n- Set to `INTELLIGENT_TIERING` to use fully elastic, intelligently-tiered storage. Intelligent-Tiering is only available for OpenZFS file systems with the Multi-AZ deployment type.\n\nDefault value is `SSD` . For more information, see [Storage type options](https://docs.aws.amazon.com/fsx/latest/WindowsGuide/optimize-fsx-costs.html#storage-type-options) in the *FSx for Windows File Server User Guide* , [Multiple storage options](https://docs.aws.amazon.com/fsx/latest/LustreGuide/what-is.html#storage-options) in the *FSx for Lustre User Guide* , and [Working with Intelligent-Tiering](https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/performance-intelligent-tiering) in the *Amazon FSx for OpenZFS User Guide* .", "title": "StorageType", "type": "string" }, @@ -100952,7 +100900,7 @@ "type": "boolean" }, "RecordSizeKiB": { - "markdownDescription": "Specifies the suggested block size for a volume in a ZFS dataset, in kibibytes (KiB). Valid values are 4, 8, 16, 32, 64, 128, 256, 512, or 1024 KiB. The default is 128 KiB. We recommend using the default setting for the majority of use cases. Generally, workloads that write in fixed small or large record sizes may benefit from setting a custom record size, like database workloads (small record size) or media streaming workloads (large record size). For additional guidance on when to set a custom record size, see [ZFS Record size](https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/performance.html#record-size-performance) in the *Amazon FSx for OpenZFS User Guide* .", + "markdownDescription": "Specifies the suggested block size for a volume in a ZFS dataset, in kibibytes (KiB). For file systems using the Intelligent-Tiering storage class, valid values are 128, 256, 512, 1024, 2048, or 4096 KiB, with a default of 1024 KiB. For all other file systems, valid values are 4, 8, 16, 32, 64, 128, 256, 512, or 1024 KiB, with a default of 128 KiB. We recommend using the default setting for the majority of use cases. Generally, workloads that write in fixed small or large record sizes may benefit from setting a custom record size, like database workloads (small record size) or media streaming workloads (large record size). For additional guidance on when to set a custom record size, see [ZFS Record size](https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/performance.html#record-size-performance) in the *Amazon FSx for OpenZFS User Guide* .", "title": "RecordSizeKiB", "type": "number" }, @@ -105324,7 +105272,7 @@ "type": "object" }, "ConnectionType": { - "markdownDescription": "The type of the connection. Currently, these types are supported:\n\n- `JDBC` - Designates a connection to a database through Java Database Connectivity (JDBC).\n\n`JDBC` Connections use the following ConnectionParameters.\n\n- Required: All of ( `HOST` , `PORT` , `JDBC_ENGINE` ) or `JDBC_CONNECTION_URL` .\n- Required: All of ( `USERNAME` , `PASSWORD` ) or `SECRET_ID` .\n- Optional: `JDBC_ENFORCE_SSL` , `CUSTOM_JDBC_CERT` , `CUSTOM_JDBC_CERT_STRING` , `SKIP_CUSTOM_JDBC_CERT_VALIDATION` . These parameters are used to configure SSL with JDBC.\n- `KAFKA` - Designates a connection to an Apache Kafka streaming platform.\n\n`KAFKA` Connections use the following ConnectionParameters.\n\n- Required: `KAFKA_BOOTSTRAP_SERVERS` .\n- Optional: `KAFKA_SSL_ENABLED` , `KAFKA_CUSTOM_CERT` , `KAFKA_SKIP_CUSTOM_CERT_VALIDATION` . These parameters are used to configure SSL with `KAFKA` .\n- Optional: `KAFKA_CLIENT_KEYSTORE` , `KAFKA_CLIENT_KEYSTORE_PASSWORD` , `KAFKA_CLIENT_KEY_PASSWORD` , `ENCRYPTED_KAFKA_CLIENT_KEYSTORE_PASSWORD` , `ENCRYPTED_KAFKA_CLIENT_KEY_PASSWORD` . These parameters are used to configure TLS client configuration with SSL in `KAFKA` .\n- Optional: `KAFKA_SASL_MECHANISM` . Can be specified as `SCRAM-SHA-512` , `GSSAPI` , or `AWS_MSK_IAM` .\n- Optional: `KAFKA_SASL_SCRAM_USERNAME` , `KAFKA_SASL_SCRAM_PASSWORD` , `ENCRYPTED_KAFKA_SASL_SCRAM_PASSWORD` . These parameters are used to configure SASL/SCRAM-SHA-512 authentication with `KAFKA` .\n- Optional: `KAFKA_SASL_GSSAPI_KEYTAB` , `KAFKA_SASL_GSSAPI_KRB5_CONF` , `KAFKA_SASL_GSSAPI_SERVICE` , `KAFKA_SASL_GSSAPI_PRINCIPAL` . These parameters are used to configure SASL/GSSAPI authentication with `KAFKA` .\n- `MONGODB` - Designates a connection to a MongoDB document database.\n\n`MONGODB` Connections use the following ConnectionParameters.\n\n- Required: `CONNECTION_URL` .\n- Required: All of ( `USERNAME` , `PASSWORD` ) or `SECRET_ID` .\n- `SALESFORCE` - Designates a connection to Salesforce using OAuth authencation.\n\n- Requires the `AuthenticationConfiguration` member to be configured.\n- `VIEW_VALIDATION_REDSHIFT` - Designates a connection used for view validation by Amazon Redshift.\n- `VIEW_VALIDATION_ATHENA` - Designates a connection used for view validation by Amazon Athena.\n- `NETWORK` - Designates a network connection to a data source within an Amazon Virtual Private Cloud environment (Amazon VPC).\n\n`NETWORK` Connections do not require ConnectionParameters. Instead, provide a PhysicalConnectionRequirements.\n- `MARKETPLACE` - Uses configuration settings contained in a connector purchased from AWS Marketplace to read from and write to data stores that are not natively supported by AWS Glue .\n\n`MARKETPLACE` Connections use the following ConnectionParameters.\n\n- Required: `CONNECTOR_TYPE` , `CONNECTOR_URL` , `CONNECTOR_CLASS_NAME` , `CONNECTION_URL` .\n- Required for `JDBC` `CONNECTOR_TYPE` connections: All of ( `USERNAME` , `PASSWORD` ) or `SECRET_ID` .\n- `CUSTOM` - Uses configuration settings contained in a custom connector to read from and write to data stores that are not natively supported by AWS Glue .\n\n`SFTP` is not supported.\n\nFor more information about how optional ConnectionProperties are used to configure features in AWS Glue , consult [AWS Glue connection properties](https://docs.aws.amazon.com/glue/latest/dg/connection-defining.html) .\n\nFor more information about how optional ConnectionProperties are used to configure features in AWS Glue Studio, consult [Using connectors and connections](https://docs.aws.amazon.com/glue/latest/ug/connectors-chapter.html) .", + "markdownDescription": "The type of the connection. Currently, these types are supported:\n\n- `JDBC` - Designates a connection to a database through Java Database Connectivity (JDBC).\n\n`JDBC` Connections use the following ConnectionParameters.\n\n- Required: All of ( `HOST` , `PORT` , `JDBC_ENGINE` ) or `JDBC_CONNECTION_URL` .\n- Required: All of ( `USERNAME` , `PASSWORD` ) or `SECRET_ID` .\n- Optional: `JDBC_ENFORCE_SSL` , `CUSTOM_JDBC_CERT` , `CUSTOM_JDBC_CERT_STRING` , `SKIP_CUSTOM_JDBC_CERT_VALIDATION` . These parameters are used to configure SSL with JDBC.\n- `KAFKA` - Designates a connection to an Apache Kafka streaming platform.\n\n`KAFKA` Connections use the following ConnectionParameters.\n\n- Required: `KAFKA_BOOTSTRAP_SERVERS` .\n- Optional: `KAFKA_SSL_ENABLED` , `KAFKA_CUSTOM_CERT` , `KAFKA_SKIP_CUSTOM_CERT_VALIDATION` . These parameters are used to configure SSL with `KAFKA` .\n- Optional: `KAFKA_CLIENT_KEYSTORE` , `KAFKA_CLIENT_KEYSTORE_PASSWORD` , `KAFKA_CLIENT_KEY_PASSWORD` , `ENCRYPTED_KAFKA_CLIENT_KEYSTORE_PASSWORD` , `ENCRYPTED_KAFKA_CLIENT_KEY_PASSWORD` . These parameters are used to configure TLS client configuration with SSL in `KAFKA` .\n- Optional: `KAFKA_SASL_MECHANISM` . Can be specified as `SCRAM-SHA-512` , `GSSAPI` , or `AWS_MSK_IAM` .\n- Optional: `KAFKA_SASL_SCRAM_USERNAME` , `KAFKA_SASL_SCRAM_PASSWORD` , `ENCRYPTED_KAFKA_SASL_SCRAM_PASSWORD` . These parameters are used to configure SASL/SCRAM-SHA-512 authentication with `KAFKA` .\n- Optional: `KAFKA_SASL_GSSAPI_KEYTAB` , `KAFKA_SASL_GSSAPI_KRB5_CONF` , `KAFKA_SASL_GSSAPI_SERVICE` , `KAFKA_SASL_GSSAPI_PRINCIPAL` . These parameters are used to configure SASL/GSSAPI authentication with `KAFKA` .\n- `MONGODB` - Designates a connection to a MongoDB document database.\n\n`MONGODB` Connections use the following ConnectionParameters.\n\n- Required: `CONNECTION_URL` .\n- Required: All of ( `USERNAME` , `PASSWORD` ) or `SECRET_ID` .\n- `VIEW_VALIDATION_REDSHIFT` - Designates a connection used for view validation by Amazon Redshift.\n- `VIEW_VALIDATION_ATHENA` - Designates a connection used for view validation by Amazon Athena.\n- `NETWORK` - Designates a network connection to a data source within an Amazon Virtual Private Cloud environment (Amazon VPC).\n\n`NETWORK` Connections do not require ConnectionParameters. Instead, provide a PhysicalConnectionRequirements.\n- `MARKETPLACE` - Uses configuration settings contained in a connector purchased from AWS Marketplace to read from and write to data stores that are not natively supported by AWS Glue .\n\n`MARKETPLACE` Connections use the following ConnectionParameters.\n\n- Required: `CONNECTOR_TYPE` , `CONNECTOR_URL` , `CONNECTOR_CLASS_NAME` , `CONNECTION_URL` .\n- Required for `JDBC` `CONNECTOR_TYPE` connections: All of ( `USERNAME` , `PASSWORD` ) or `SECRET_ID` .\n- `CUSTOM` - Uses configuration settings contained in a custom connector to read from and write to data stores that are not natively supported by AWS Glue .\n\nAdditionally, a `ConnectionType` for the following SaaS connectors is supported:\n\n- `FACEBOOKADS` - Designates a connection to Facebook Ads.\n- `GOOGLEADS` - Designates a connection to Google Ads.\n- `GOOGLESHEETS` - Designates a connection to Google Sheets.\n- `GOOGLEANALYTICS4` - Designates a connection to Google Analytics 4.\n- `HUBSPOT` - Designates a connection to HubSpot.\n- `INSTAGRAMADS` - Designates a connection to Instagram Ads.\n- `INTERCOM` - Designates a connection to Intercom.\n- `JIRACLOUD` - Designates a connection to Jira Cloud.\n- `MARKETO` - Designates a connection to Adobe Marketo Engage.\n- `NETSUITEERP` - Designates a connection to Oracle NetSuite.\n- `SALESFORCE` - Designates a connection to Salesforce using OAuth authentication.\n- `SALESFORCEMARKETINGCLOUD` - Designates a connection to Salesforce Marketing Cloud.\n- `SALESFORCEPARDOT` - Designates a connection to Salesforce Marketing Cloud Account Engagement (MCAE).\n- `SAPODATA` - Designates a connection to SAP OData.\n- `SERVICENOW` - Designates a connection to ServiceNow.\n- `SLACK` - Designates a connection to Slack.\n- `SNAPCHATADS` - Designates a connection to Snapchat Ads.\n- `STRIPE` - Designates a connection to Stripe.\n- `ZENDESK` - Designates a connection to Zendesk.\n- `ZOHOCRM` - Designates a connection to Zoho CRM.\n\nFor more information on the connection parameters needed for a particular connector, see the documentation for the connector in [Adding an AWS Glue connection](https://docs.aws.amazon.com/glue/latest/dg/console-connections.html) in the AWS Glue User Guide.\n\n`SFTP` is not supported.\n\nFor more information about how optional ConnectionProperties are used to configure features in AWS Glue , consult [AWS Glue connection properties](https://docs.aws.amazon.com/glue/latest/dg/connection-defining.html) .\n\nFor more information about how optional ConnectionProperties are used to configure features in AWS Glue Studio, consult [Using connectors and connections](https://docs.aws.amazon.com/glue/latest/ug/connectors-chapter.html) .", "title": "ConnectionType", "type": "string" }, @@ -106587,7 +106535,7 @@ "type": "number" }, "WorkerType": { - "markdownDescription": "The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.\n\n- For the `G.1X` worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.\n- For the `G.2X` worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.\n- For the `G.4X` worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for AWS Glue version 3.0 or later Spark ETL jobs in the following AWS Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).\n- For the `G.8X` worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for AWS Glue version 3.0 or later Spark ETL jobs, in the same AWS Regions as supported for the `G.4X` worker type.\n- For the `G.025X` worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for AWS Glue version 3.0 streaming jobs.\n- For the `Z.2X` worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.", + "markdownDescription": "The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.\n\n- For the `G.1X` worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 94GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.\n- For the `G.2X` worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 138GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.\n- For the `G.4X` worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for AWS Glue version 3.0 or later Spark ETL jobs in the following AWS Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).\n- For the `G.8X` worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for AWS Glue version 3.0 or later Spark ETL jobs, in the same AWS Regions as supported for the `G.4X` worker type.\n- For the `G.025X` worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk, and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for AWS Glue version 3.0 or later streaming jobs.\n- For the `Z.2X` worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk, and provides up to 8 Ray workers based on the autoscaler.", "title": "WorkerType", "type": "string" } @@ -110783,7 +110731,7 @@ }, "SageMakerMachineLearningModelResourceData": { "$ref": "#/definitions/AWS::Greengrass::ResourceDefinition.SageMakerMachineLearningModelResourceData", - "markdownDescription": "Settings for a machine learning resource saved as an SageMaker training job.", + "markdownDescription": "Settings for a machine learning resource saved as an SageMaker AI training job.", "title": "SageMakerMachineLearningModelResourceData" }, "SecretsManagerSecretResourceData": { @@ -110896,7 +110844,7 @@ "title": "OwnerSetting" }, "SageMakerJobArn": { - "markdownDescription": "The Amazon Resource Name (ARN) of the Amazon SageMaker training job that represents the source model.", + "markdownDescription": "The Amazon Resource Name (ARN) of the Amazon SageMaker AI training job that represents the source model.", "title": "SageMakerJobArn", "type": "string" } @@ -111088,7 +111036,7 @@ }, "SageMakerMachineLearningModelResourceData": { "$ref": "#/definitions/AWS::Greengrass::ResourceDefinitionVersion.SageMakerMachineLearningModelResourceData", - "markdownDescription": "Settings for a machine learning resource saved as an SageMaker training job.", + "markdownDescription": "Settings for a machine learning resource saved as an SageMaker AI training job.", "title": "SageMakerMachineLearningModelResourceData" }, "SecretsManagerSecretResourceData": { @@ -111184,7 +111132,7 @@ "title": "OwnerSetting" }, "SageMakerJobArn": { - "markdownDescription": "The Amazon Resource Name (ARN) of the Amazon SageMaker training job that represents the source model.", + "markdownDescription": "The Amazon Resource Name (ARN) of the Amazon SageMaker AI training job that represents the source model.", "title": "SageMakerJobArn", "type": "string" } @@ -117162,7 +117110,7 @@ "additionalProperties": false, "properties": { "RepositoryName": { - "markdownDescription": "The name of the container repository where the output container image is stored. This name is prefixed by the repository location.", + "markdownDescription": "The name of the container repository where the output container image is stored. This name is prefixed by the repository location. For example, `/repository_name` .", "title": "RepositoryName", "type": "string" }, @@ -117509,7 +117457,7 @@ "additionalProperties": false, "properties": { "RepositoryName": { - "markdownDescription": "The name of the container repository where the output container image is stored. This name is prefixed by the repository location.", + "markdownDescription": "The name of the container repository where the output container image is stored. This name is prefixed by the repository location. For example, `/repository_name` .", "title": "RepositoryName", "type": "string" }, @@ -117686,7 +117634,7 @@ "type": "boolean" }, "TimeoutMinutes": { - "markdownDescription": "The maximum time in minutes that tests are permitted to run.\n\n> The timeoutMinutes attribute is not currently active. This value is ignored.", + "markdownDescription": "The maximum time in minutes that tests are permitted to run.\n\n> The timeout attribute is not currently active. This value is ignored.", "title": "TimeoutMinutes", "type": "number" } @@ -117927,7 +117875,7 @@ "type": "boolean" }, "TimeoutMinutes": { - "markdownDescription": "The maximum time in minutes that tests are permitted to run.\n\n> The timeoutMinutes attribute is not currently active. This value is ignored.", + "markdownDescription": "The maximum time in minutes that tests are permitted to run.\n\n> The timeout attribute is not currently active. This value is ignored.", "title": "TimeoutMinutes", "type": "number" } @@ -119855,7 +119803,7 @@ "properties": { "HealthEventsConfig": { "$ref": "#/definitions/AWS::InternetMonitor::Monitor.HealthEventsConfig", - "markdownDescription": "A complex type with the configuration information that determines the threshold and other conditions for when Internet Monitor creates a health event for an overall performance or availability issue, across an application's geographies.\n\nDefines the percentages, for overall performance scores and availability scores for an application, that are the thresholds for when Amazon CloudWatch Internet Monitor creates a health event. You can override the defaults to set a custom threshold for overall performance or availability scores, or both.\n\nYou can also set thresholds for local health scores,, where Internet Monitor creates a health event when scores cross a threshold for one or more city-networks, in addition to creating an event when an overall score crosses a threshold.\n\nIf you don't set a health event threshold, the default value is 95%.\n\nFor local thresholds, you also set a minimum percentage of overall traffic that is impacted by an issue before Internet Monitor creates an event. In addition, you can disable local thresholds, for performance scores, availability scores, or both.\n\nFor more information, see [Change health event thresholds](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-IM-overview.html#IMUpdateThresholdFromOverview) in the Internet Monitor section of the *CloudWatch User Guide* .", + "markdownDescription": "A complex type with the configuration information that determines the threshold and other conditions for when Internet Monitor creates a health event for an overall performance or availability issue, across an application's geographies.\n\nDefines the percentages, for overall performance scores and availability scores for an application, that are the thresholds for when Internet Monitor creates a health event. You can override the defaults to set a custom threshold for overall performance or availability scores, or both.\n\nYou can also set thresholds for local health scores,, where Internet Monitor creates a health event when scores cross a threshold for one or more city-networks, in addition to creating an event when an overall score crosses a threshold.\n\nIf you don't set a health event threshold, the default value is 95%.\n\nFor local thresholds, you also set a minimum percentage of overall traffic that is impacted by an issue before Internet Monitor creates an event. In addition, you can disable local thresholds, for performance scores, availability scores, or both.\n\nFor more information, see [Change health event thresholds](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-IM-overview.html#IMUpdateThresholdFromOverview) in the Internet Monitor section of the *CloudWatch User Guide* .", "title": "HealthEventsConfig" }, "IncludeLinkedAccounts": { @@ -136420,7 +136368,7 @@ "type": "array" }, "Edition": { - "markdownDescription": "Indicates whether the index is a Enterprise Edition index or a Developer Edition index. Valid values are `DEVELOPER_EDITION` and `ENTERPRISE_EDITION` .", + "markdownDescription": "Indicates whether the index is a Enterprise Edition index, a Developer Edition index, or a GenAI Enterprise Edition index.", "title": "Edition", "type": "string" }, @@ -149540,7 +149488,7 @@ "additionalProperties": false, "properties": { "PolicyDocument": { - "markdownDescription": "Specify the policy, in JSON.\n\n*Data protection policy*\n\nA data protection policy must include two JSON blocks:\n\n- The first block must include both a `DataIdentifer` array and an `Operation` property with an `Audit` action. The `DataIdentifer` array lists the types of sensitive data that you want to mask. For more information about the available options, see [Types of data that you can mask](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/mask-sensitive-log-data-types.html) .\n\nThe `Operation` property with an `Audit` action is required to find the sensitive data terms. This `Audit` action must contain a `FindingsDestination` object. You can optionally use that `FindingsDestination` object to list one or more destinations to send audit findings to. If you specify destinations such as log groups, Firehose streams, and S3 buckets, they must already exist.\n- The second block must include both a `DataIdentifer` array and an `Operation` property with an `Deidentify` action. The `DataIdentifer` array must exactly match the `DataIdentifer` array in the first block of the policy.\n\nThe `Operation` property with the `Deidentify` action is what actually masks the data, and it must contain the `\"MaskConfig\": {}` object. The `\"MaskConfig\": {}` object must be empty.\n\n> The contents of the two `DataIdentifer` arrays must match exactly. \n\nIn addition to the two JSON blocks, the `policyDocument` can also include `Name` , `Description` , and `Version` fields. The `Name` is different than the operation's `policyName` parameter, and is used as a dimension when CloudWatch Logs reports audit findings metrics to CloudWatch .\n\nThe JSON specified in `policyDocument` can be up to 30,720 characters long.\n\n*Subscription filter policy*\n\nA subscription filter policy can include the following attributes in a JSON block:\n\n- *DestinationArn* The ARN of the destination to deliver log events to. Supported destinations are:\n\n- An Kinesis Data Streams data stream in the same account as the subscription policy, for same-account delivery.\n- An Firehose data stream in the same account as the subscription policy, for same-account delivery.\n- A Lambda function in the same account as the subscription policy, for same-account delivery.\n- A logical destination in a different account created with [PutDestination](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDestination.html) , for cross-account delivery. Kinesis Data Streams and Firehose are supported as logical destinations.\n- *RoleArn* The ARN of an IAM role that grants CloudWatch Logs permissions to deliver ingested log events to the destination stream. You don't need to provide the ARN when you are working with a logical destination for cross-account delivery.\n- *FilterPattern* A filter pattern for subscribing to a filtered stream of log events.\n- *Distribution* The method used to distribute log data to the destination. By default, log data is grouped by log stream, but the grouping can be set to `Random` for a more even distribution. This property is only applicable when the destination is an Kinesis Data Streams data stream.", + "markdownDescription": "Specify the policy, in JSON.\n\n*Data protection policy*\n\nA data protection policy must include two JSON blocks:\n\n- The first block must include both a `DataIdentifer` array and an `Operation` property with an `Audit` action. The `DataIdentifer` array lists the types of sensitive data that you want to mask. For more information about the available options, see [Types of data that you can mask](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/mask-sensitive-log-data-types.html) .\n\nThe `Operation` property with an `Audit` action is required to find the sensitive data terms. This `Audit` action must contain a `FindingsDestination` object. You can optionally use that `FindingsDestination` object to list one or more destinations to send audit findings to. If you specify destinations such as log groups, Firehose streams, and S3 buckets, they must already exist.\n- The second block must include both a `DataIdentifer` array and an `Operation` property with an `Deidentify` action. The `DataIdentifer` array must exactly match the `DataIdentifer` array in the first block of the policy.\n\nThe `Operation` property with the `Deidentify` action is what actually masks the data, and it must contain the `\"MaskConfig\": {}` object. The `\"MaskConfig\": {}` object must be empty.\n\n> The contents of the two `DataIdentifer` arrays must match exactly. \n\nIn addition to the two JSON blocks, the `policyDocument` can also include `Name` , `Description` , and `Version` fields. The `Name` is different than the operation's `policyName` parameter, and is used as a dimension when CloudWatch Logs reports audit findings metrics to CloudWatch .\n\nThe JSON specified in `policyDocument` can be up to 30,720 characters long.\n\n*Subscription filter policy*\n\nA subscription filter policy can include the following attributes in a JSON block:\n\n- *DestinationArn* The ARN of the destination to deliver log events to. Supported destinations are:\n\n- An Kinesis Data Streams data stream in the same account as the subscription policy, for same-account delivery.\n- An Firehose data stream in the same account as the subscription policy, for same-account delivery.\n- A Lambda function in the same account as the subscription policy, for same-account delivery.\n- A logical destination in a different account created with [PutDestination](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDestination.html) , for cross-account delivery. Kinesis Data Streams and Firehose are supported as logical destinations.\n- *RoleArn* The ARN of an IAM role that grants CloudWatch Logs permissions to deliver ingested log events to the destination stream. You don't need to provide the ARN when you are working with a logical destination for cross-account delivery.\n- *FilterPattern* A filter pattern for subscribing to a filtered stream of log events.\n- *Distribution* The method used to distribute log data to the destination. By default, log data is grouped by log stream, but the grouping can be set to `Random` for a more even distribution. This property is only applicable when the destination is an Kinesis Data Streams data stream.\n\n*Field index policy*\n\nA field index filter policy can include the following attribute in a JSON block:\n\n- *Fields* The array of field indexes to create.\n\nThe following is an example of an index policy document that creates two indexes, `RequestId` and `TransactionId` .\n\n`\"policyDocument\": \"{ \\\"Fields\\\": [ \\\"RequestId\\\", \\\"TransactionId\\\" ] }\"`\n\n*Transformer policy*\n\nA transformer policy must include one JSON block with the array of processors and their configurations. For more information about available processors, see [Processors that you can use](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CloudWatch-Logs-Transformation.html#CloudWatch-Logs-Transformation-Processors) .", "title": "PolicyDocument", "type": "string" }, @@ -149555,12 +149503,12 @@ "type": "string" }, "Scope": { - "markdownDescription": "Currently the only valid value for this parameter is `ALL` , which specifies that the policy applies to all log groups in the account. If you omit this parameter, the default of `ALL` is used. To scope down a subscription filter policy to a subset of log groups, use the `selectionCriteria` parameter.", + "markdownDescription": "Currently the only valid value for this parameter is `ALL` , which specifies that the policy applies to all log groups in the account. If you omit this parameter, the default of `ALL` is used. To scope down a subscription filter policy to a subset of log groups, use the `SelectionCriteria` parameter.", "title": "Scope", "type": "string" }, "SelectionCriteria": { - "markdownDescription": "Use this parameter to apply a subscription filter policy to a subset of log groups in the account. Currently, the only supported filter is `LogGroupName NOT IN []` . The `selectionCriteria` string can be up to 25KB in length. The length is determined by using its UTF-8 bytes.\n\nUsing the `selectionCriteria` parameter is useful to help prevent infinite loops. For more information, see [Log recursion prevention](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/Subscriptions-recursion-prevention.html) .\n\nSpecifing `selectionCriteria` is valid only when you specify `SUBSCRIPTION_FILTER_POLICY` for `policyType` .", + "markdownDescription": "Use this parameter to apply the new policy to a subset of log groups in the account.\n\nYou need to specify `SelectionCriteria` only when you specify `SUBSCRIPTION_FILTER_POLICY` , `FIELD_INDEX_POLICY` or `TRANSFORMER_POLICY` for `PolicyType` .\n\nIf `PolicyType` is `SUBSCRIPTION_FILTER_POLICY` , the only supported `SelectionCriteria` filter is `LogGroupName NOT IN []`\n\nIf `PolicyType` is `FIELD_INDEX_POLICY` or `TRANSFORMER_POLICY` , the only supported `SelectionCriteria` filter is `LogGroupNamePrefix`\n\nThe `SelectionCriteria` string can be up to 25KB in length. The length is determined by using its UTF-8 bytes.\n\nUsing the `SelectionCriteria` parameter with `SUBSCRIPTION_FILTER_POLICY` is useful to help prevent infinite loops. For more information, see [Log recursion prevention](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/Subscriptions-recursion-prevention.html) .", "title": "SelectionCriteria", "type": "string" } @@ -149642,7 +149590,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "The tags that have been assigned to this delivery.", + "markdownDescription": "An array of key-value pairs to apply to the delivery.\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html) .", "title": "Tags", "type": "array" } @@ -149710,12 +149658,12 @@ "additionalProperties": false, "properties": { "DeliveryDestinationPolicy": { - "markdownDescription": "A structure that contains information about one delivery destination policy.", + "markdownDescription": "An IAM policy that grants permissions to CloudWatch Logs to deliver logs cross-account to a specified destination in this account. For examples of this policy, see [Examples](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDeliveryDestinationPolicy.html#API_PutDeliveryDestinationPolicy_Examples) in the CloudWatch Logs API Reference.", "title": "DeliveryDestinationPolicy", "type": "object" }, "DestinationResourceArn": { - "markdownDescription": "The ARN of the AWS destination that this delivery destination represents. That AWS destination can be a log group in CloudWatch Logs, an Amazon S3 bucket, or a delivery stream in Firehose.", + "markdownDescription": "The ARN of the AWS destination that this delivery destination represents. That AWS destination can be a log group in CloudWatch Logs , an Amazon S3 bucket, or a Firehose stream.", "title": "DestinationResourceArn", "type": "string" }, @@ -149728,7 +149676,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "The tags that have been assigned to this delivery destination.", + "markdownDescription": "An array of key-value pairs to apply to the delivery destination.\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html) .", "title": "Tags", "type": "array" } @@ -149805,7 +149753,7 @@ "type": "string" }, "ResourceArn": { - "markdownDescription": "", + "markdownDescription": "The ARN of the AWS resource that is generating and sending logs. For example, `arn:aws:workmail:us-east-1:123456789012:organization/m-1234EXAMPLEabcd1234abcd1234abcd1234`", "title": "ResourceArn", "type": "string" }, @@ -149813,7 +149761,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "The tags that have been assigned to this delivery source.", + "markdownDescription": "An array of key-value pairs to apply to the delivery source.\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html) .", "title": "Tags", "type": "array" } @@ -165497,7 +165445,7 @@ "type": "string" }, "EngineVersion": { - "markdownDescription": "The Valkey or Redis OSS engine version used by the cluster .", + "markdownDescription": "The Redis engine version used by the cluster .", "title": "EngineVersion", "type": "string" }, @@ -174871,7 +174819,7 @@ "additionalProperties": false, "properties": { "Content": { - "markdownDescription": "The policy text content. You can specify the policy content as a JSON object or a JSON string.\n\n> When you specify the policy content as a JSON string, you can't perform drift detection on the CloudFormation stack. For this reason, we recommend specifying the policy content as a JSON object instead. \n\nThe text that you supply must adhere to the rules of the policy type you specify in the `Type` parameter. The following AWS Organizations quotas are enforced for the maximum size of a policy document:\n\n- Service control policies: 5,120 characters\n- Backup policies: 10,000 characters\n- Tag policies: 10,000 characters\n- Chatbot policies: 10,000 characters\n- AI services opt-out policies: 2,500 characters\n\nFor more information about Organizations service quotas, see [Quotas for AWS Organizations](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) in the *AWS Organizations User Guide* .", + "markdownDescription": "The policy text content. You can specify the policy content as a JSON object or a JSON string.\n\n> When you specify the policy content as a JSON string, you can't perform drift detection on the CloudFormation stack. For this reason, we recommend specifying the policy content as a JSON object instead. \n\nThe text that you supply must adhere to the rules of the policy type you specify in the `Type` parameter. The following AWS Organizations quotas are enforced for the maximum size of a policy document:\n\n- Service control policies: 5,120 characters\n- Resource control policies: 5,120 characters\n- Declarative policies: 10,000 characters\n- Backup policies: 10,000 characters\n- Tag policies: 10,000 characters\n- Chatbot policies: 10,000 characters\n- AI services opt-out policies: 2,500 characters\n\nFor more information about Organizations service quotas, see [Quotas for AWS Organizations](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) in the *AWS Organizations User Guide* .", "title": "Content", "type": "object" }, @@ -182522,7 +182470,7 @@ }, "SageMakerPipelineParameters": { "$ref": "#/definitions/AWS::Pipes::Pipe.PipeTargetSageMakerPipelineParameters", - "markdownDescription": "The parameters for using a SageMaker pipeline as a target.", + "markdownDescription": "The parameters for using a SageMaker AI pipeline as a target.", "title": "SageMakerPipelineParameters" }, "SqsQueueParameters": { @@ -182588,7 +182536,7 @@ "items": { "$ref": "#/definitions/AWS::Pipes::Pipe.SageMakerPipelineParameter" }, - "markdownDescription": "List of Parameter names and values for SageMaker Model Building Pipeline execution.", + "markdownDescription": "List of Parameter names and values for SageMaker AI Model Building Pipeline execution.", "title": "PipelineParameterList", "type": "array" } @@ -182684,12 +182632,12 @@ "additionalProperties": false, "properties": { "Name": { - "markdownDescription": "Name of parameter to start execution of a SageMaker Model Building Pipeline.", + "markdownDescription": "Name of parameter to start execution of a SageMaker AI Model Building Pipeline.", "title": "Name", "type": "string" }, "Value": { - "markdownDescription": "Value of parameter to start execution of a SageMaker Model Building Pipeline.", + "markdownDescription": "Value of parameter to start execution of a SageMaker AI Model Building Pipeline.", "title": "Value", "type": "string" } @@ -209341,28 +209289,18 @@ "additionalProperties": false, "properties": { "ContainsHeader": { - "markdownDescription": "Whether the file has a header row, or the files each have a header row.", - "title": "ContainsHeader", "type": "boolean" }, "Delimiter": { - "markdownDescription": "The delimiter between values in the file.", - "title": "Delimiter", "type": "string" }, "Format": { - "markdownDescription": "File format.", - "title": "Format", "type": "string" }, "StartFromRow": { - "markdownDescription": "A row number to start reading data from.", - "title": "StartFromRow", "type": "number" }, "TextQualifier": { - "markdownDescription": "Text qualifier.", - "title": "TextQualifier", "type": "string" } }, @@ -224540,7 +224478,7 @@ "type": "array" }, "AutoMinorVersionUpgrade": { - "markdownDescription": "Specifies whether minor engine upgrades are applied automatically to the DB cluster during the maintenance window. By default, minor engine upgrades are applied automatically.\n\nValid for Cluster Type: Multi-AZ DB clusters only", + "markdownDescription": "Specifies whether minor engine upgrades are applied automatically to the DB cluster during the maintenance window. By default, minor engine upgrades are applied automatically.\n\nValid for Cluster Type: Aurora DB clusters and Multi-AZ DB cluster", "title": "AutoMinorVersionUpgrade", "type": "boolean" }, @@ -224682,7 +224620,7 @@ }, "MasterUserSecret": { "$ref": "#/definitions/AWS::RDS::DBCluster.MasterUserSecret", - "markdownDescription": "The secret managed by RDS in AWS Secrets Manager for the master user password.\n\nFor more information, see [Password management with AWS Secrets Manager](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-secrets-manager.html) in the *Amazon RDS User Guide* and [Password management with AWS Secrets Manager](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/rds-secrets-manager.html) in the *Amazon Aurora User Guide.*", + "markdownDescription": "The secret managed by RDS in AWS Secrets Manager for the master user password.\n\n> When you restore a DB cluster from a snapshot, Amazon RDS generates a new secret instead of reusing the secret specified in the `SecretArn` property. This ensures that the restored DB cluster is securely managed with a dedicated secret. To maintain consistent integration with your application, you might need to update resource configurations to reference the newly created secret. \n\nFor more information, see [Password management with AWS Secrets Manager](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-secrets-manager.html) in the *Amazon RDS User Guide* and [Password management with AWS Secrets Manager](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/rds-secrets-manager.html) in the *Amazon Aurora User Guide.*", "title": "MasterUserSecret" }, "MasterUsername": { @@ -224691,12 +224629,12 @@ "type": "string" }, "MonitoringInterval": { - "markdownDescription": "The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB cluster. To turn off collecting Enhanced Monitoring metrics, specify `0` .\n\nIf `MonitoringRoleArn` is specified, also set `MonitoringInterval` to a value other than `0` .\n\nValid for Cluster Type: Multi-AZ DB clusters only\n\nValid Values: `0 | 1 | 5 | 10 | 15 | 30 | 60`\n\nDefault: `0`", + "markdownDescription": "The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB cluster. To turn off collecting Enhanced Monitoring metrics, specify `0` .\n\nIf `MonitoringRoleArn` is specified, also set `MonitoringInterval` to a value other than `0` .\n\nValid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters\n\nValid Values: `0 | 1 | 5 | 10 | 15 | 30 | 60`\n\nDefault: `0`", "title": "MonitoringInterval", "type": "number" }, "MonitoringRoleArn": { - "markdownDescription": "The Amazon Resource Name (ARN) for the IAM role that permits RDS to send Enhanced Monitoring metrics to Amazon CloudWatch Logs. An example is `arn:aws:iam:123456789012:role/emaccess` . For information on creating a monitoring role, see [Setting up and enabling Enhanced Monitoring](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Monitoring.OS.html#USER_Monitoring.OS.Enabling) in the *Amazon RDS User Guide* .\n\nIf `MonitoringInterval` is set to a value other than `0` , supply a `MonitoringRoleArn` value.\n\nValid for Cluster Type: Multi-AZ DB clusters only", + "markdownDescription": "The Amazon Resource Name (ARN) for the IAM role that permits RDS to send Enhanced Monitoring metrics to Amazon CloudWatch Logs. An example is `arn:aws:iam:123456789012:role/emaccess` . For information on creating a monitoring role, see [Setting up and enabling Enhanced Monitoring](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Monitoring.OS.html#USER_Monitoring.OS.Enabling) in the *Amazon RDS User Guide* .\n\nIf `MonitoringInterval` is set to a value other than `0` , supply a `MonitoringRoleArn` value.\n\nValid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters", "title": "MonitoringRoleArn", "type": "string" }, @@ -224706,17 +224644,17 @@ "type": "string" }, "PerformanceInsightsEnabled": { - "markdownDescription": "Specifies whether to turn on Performance Insights for the DB cluster.\n\nFor more information, see [Using Amazon Performance Insights](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_PerfInsights.html) in the *Amazon RDS User Guide* .\n\nValid for Cluster Type: Multi-AZ DB clusters only", + "markdownDescription": "Specifies whether to turn on Performance Insights for the DB cluster.\n\nFor more information, see [Using Amazon Performance Insights](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_PerfInsights.html) in the *Amazon RDS User Guide* .\n\nValid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters", "title": "PerformanceInsightsEnabled", "type": "boolean" }, "PerformanceInsightsKmsKeyId": { - "markdownDescription": "The AWS KMS key identifier for encryption of Performance Insights data.\n\nThe AWS KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key.\n\nIf you don't specify a value for `PerformanceInsightsKMSKeyId` , then Amazon RDS uses your default KMS key. There is a default KMS key for your AWS account . Your AWS account has a different default KMS key for each AWS Region .\n\nValid for Cluster Type: Multi-AZ DB clusters only", + "markdownDescription": "The AWS KMS key identifier for encryption of Performance Insights data.\n\nThe AWS KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key.\n\nIf you don't specify a value for `PerformanceInsightsKMSKeyId` , then Amazon RDS uses your default KMS key. There is a default KMS key for your AWS account . Your AWS account has a different default KMS key for each AWS Region .\n\nValid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters", "title": "PerformanceInsightsKmsKeyId", "type": "string" }, "PerformanceInsightsRetentionPeriod": { - "markdownDescription": "The number of days to retain Performance Insights data.\n\nValid for Cluster Type: Multi-AZ DB clusters only\n\nValid Values:\n\n- `7`\n- *month* * 31, where *month* is a number of months from 1-23. Examples: `93` (3 months * 31), `341` (11 months * 31), `589` (19 months * 31)\n- `731`\n\nDefault: `7` days\n\nIf you specify a retention period that isn't valid, such as `94` , Amazon RDS issues an error.", + "markdownDescription": "The number of days to retain Performance Insights data.\n\nValid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters\n\nValid Values:\n\n- `7`\n- *month* * 31, where *month* is a number of months from 1-23. Examples: `93` (3 months * 31), `341` (11 months * 31), `589` (19 months * 31)\n- `731`\n\nDefault: `7` days\n\nIf you specify a retention period that isn't valid, such as `94` , Amazon RDS issues an error.", "title": "PerformanceInsightsRetentionPeriod", "type": "number" }, @@ -230018,7 +229956,7 @@ "type": "array" }, "InvokerRoleName": { - "markdownDescription": "Existing AWS IAM role name in the primary AWS account that will be assumed by AWS Resilience Hub Service Principle to obtain a read-only access to your application resources while running an assessment.\n\n> - You must have `iam:passRole` permission for this role while creating or updating the application.\n> - Currently, `invokerRoleName` accepts only `[A-Za-z0-9_+=,.@-]` characters.", + "markdownDescription": "Existing AWS IAM role name in the primary AWS account that will be assumed by AWS Resilience Hub Service Principle to obtain a read-only access to your application resources while running an assessment.\n\nIf your IAM role includes a path, you must include the path in the `invokerRoleName` parameter. For example, if your IAM role's ARN is `arn:aws:iam:123456789012:role/my-path/role-name` , you should pass `my-path/role-name` .\n\n> - You must have `iam:passRole` permission for this role while creating or updating the application.\n> - Currently, `invokerRoleName` accepts only `[A-Za-z0-9_+=,.@-]` characters.", "title": "InvokerRoleName", "type": "string" }, @@ -235509,7 +235447,7 @@ "title": "BucketEncryption" }, "BucketName": { - "markdownDescription": "A name for the bucket. If you don't specify a name, AWS CloudFormation generates a unique ID and uses that ID for the bucket name. The bucket name must contain only lowercase letters, numbers, periods (.), and dashes (-) and must follow [Amazon S3 bucket restrictions and limitations](https://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html) . For more information, see [Rules for naming Amazon S3 buckets](https://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html#bucketnamingrules) in the *Amazon S3 User Guide* .\n\n> If you specify a name, you can't perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you need to replace the resource, specify a new name.", + "markdownDescription": "A name for the bucket. If you don't specify a name, AWS CloudFormation generates a unique ID and uses that ID for the bucket name. The bucket name must contain only lowercase letters, numbers, periods (.), and dashes (-) and must follow [Amazon S3 bucket restrictions and limitations](https://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html) . For more information, see [Rules for naming Amazon S3 buckets](https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html) in the *Amazon S3 User Guide* .\n\n> If you specify a name, you can't perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you need to replace the resource, specify a new name.", "title": "BucketName", "type": "string" }, @@ -238059,17 +237997,17 @@ "additionalProperties": false, "properties": { "BucketName": { - "markdownDescription": "A name for the bucket. The bucket name must contain only lowercase letters, numbers, and hyphens (-). A directory bucket name must be unique in the chosen Availability Zone. The bucket name must also follow the format `*bucket_base_name* -- *az_id* --x-s3` (for example, `*bucket_base_name* -- *usw2-az1* --x-s3` ). If you don't specify a name, AWS CloudFormation generates a unique ID and uses that ID for the bucket name. For information about bucket naming restrictions, see [Directory bucket naming rules](https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) in the *Amazon S3 User Guide* .\n\n> If you specify a name, you can't perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you need to replace the resource, specify a new name.", + "markdownDescription": "A name for the bucket. The bucket name must contain only lowercase letters, numbers, and hyphens (-). A directory bucket name must be unique in the chosen Zone (Availability Zone or Local Zone). The bucket name must also follow the format `*bucket_base_name* -- *zone_id* --x-s3` (for example, `*bucket_base_name* -- *usw2-az1* --x-s3` ). If you don't specify a name, AWS CloudFormation generates a unique ID and uses that ID for the bucket name. For information about bucket naming restrictions, see [Directory bucket naming rules](https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) in the *Amazon S3 User Guide* .\n\n> If you specify a name, you can't perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you need to replace the resource, specify a new name.", "title": "BucketName", "type": "string" }, "DataRedundancy": { - "markdownDescription": "The number of Availability Zone that's used for redundancy for the bucket.", + "markdownDescription": "The number of Zone (Availability Zone or Local Zone) that's used for redundancy for the bucket.", "title": "DataRedundancy", "type": "string" }, "LocationName": { - "markdownDescription": "The name of the location where the bucket will be created.\n\nFor directory buckets, the name of the location is the AZ ID of the Availability Zone where the bucket will be created. An example AZ ID value is `usw2-az1` .", + "markdownDescription": "The name of the location where the bucket will be created.\n\nFor directory buckets, the name of the location is the Zone ID of the Availability Zone (AZ) or Local Zone (LZ) where the bucket will be created. An example AZ ID value is `usw2-az1` .", "title": "LocationName", "type": "string" } @@ -240417,7 +240355,7 @@ "type": "object" }, "FilterPolicyScope": { - "markdownDescription": "This attribute lets you choose the filtering scope by using one of the following string value types:\n\n- `MessageAttributes` (default) - The filter is applied on the message attributes.\n- `MessageBody` - The filter is applied on the message body.", + "markdownDescription": "This attribute lets you choose the filtering scope by using one of the following string value types:\n\n- `MessageAttributes` (default) - The filter is applied on the message attributes.\n- `MessageBody` - The filter is applied on the message body.\n\n> `Null` is not a valid value for `FilterPolicyScope` . To delete a filter policy, delete the `FilterPolicy` property but keep `FilterPolicyScope` property as is.", "title": "FilterPolicyScope", "type": "string" }, @@ -244322,7 +244260,7 @@ "type": "string" }, "SageMakerImageArn": { - "markdownDescription": "The ARN of the SageMaker image that the image version belongs to.", + "markdownDescription": "The ARN of the SageMaker AI image that the image version belongs to.", "title": "SageMakerImageArn", "type": "string" }, @@ -244386,7 +244324,7 @@ }, "KernelGatewayImageConfig": { "$ref": "#/definitions/AWS::SageMaker::AppImageConfig.KernelGatewayImageConfig", - "markdownDescription": "The configuration for the file system and kernels in the SageMaker image.", + "markdownDescription": "The configuration for the file system and kernels in the SageMaker AI image.", "title": "KernelGatewayImageConfig" }, "Tags": { @@ -244522,7 +244460,7 @@ "properties": { "FileSystemConfig": { "$ref": "#/definitions/AWS::SageMaker::AppImageConfig.FileSystemConfig", - "markdownDescription": "The Amazon Elastic File System storage configuration for a SageMaker image.", + "markdownDescription": "The Amazon Elastic File System storage configuration for a SageMaker AI image.", "title": "FileSystemConfig" }, "KernelSpecs": { @@ -245053,7 +244991,7 @@ "additionalProperties": false, "properties": { "KmsKeyId": { - "markdownDescription": "The AWS Key Management Service ( AWS KMS ) key that Amazon SageMaker uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption.", + "markdownDescription": "The AWS Key Management Service ( AWS KMS ) key that Amazon SageMaker AI uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption.", "title": "KmsKeyId", "type": "string" }, @@ -245433,7 +245371,7 @@ "additionalProperties": false, "properties": { "AppNetworkAccessType": { - "markdownDescription": "Specifies the VPC used for non-EFS traffic. The default value is `PublicInternetOnly` .\n\n- `PublicInternetOnly` - Non-EFS traffic is through a VPC managed by Amazon SageMaker , which allows direct internet access\n- `VpcOnly` - All Studio traffic is through the specified VPC and subnets\n\n*Valid Values* : `PublicInternetOnly | VpcOnly`", + "markdownDescription": "Specifies the VPC used for non-EFS traffic. The default value is `PublicInternetOnly` .\n\n- `PublicInternetOnly` - Non-EFS traffic is through a VPC managed by Amazon SageMaker AI , which allows direct internet access\n- `VpcOnly` - All Studio traffic is through the specified VPC and subnets\n\n*Valid Values* : `PublicInternetOnly | VpcOnly`", "title": "AppNetworkAccessType", "type": "string" }, @@ -245736,7 +245674,7 @@ "type": "string" }, "FileSystemPath": { - "markdownDescription": "The path to the file system directory that is accessible in Amazon SageMaker Studio. Permitted users can access only this directory and below.", + "markdownDescription": "The path to the file system directory that is accessible in Amazon SageMaker AI Studio. Permitted users can access only this directory and below.", "title": "FileSystemPath", "type": "string" } @@ -245799,13 +245737,13 @@ "items": { "$ref": "#/definitions/AWS::SageMaker::Domain.CustomImage" }, - "markdownDescription": "A list of custom SageMaker images that are configured to run as a KernelGateway app.", + "markdownDescription": "A list of custom SageMaker AI images that are configured to run as a KernelGateway app.", "title": "CustomImages", "type": "array" }, "DefaultResourceSpec": { "$ref": "#/definitions/AWS::SageMaker::Domain.ResourceSpec", - "markdownDescription": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the KernelGateway app.\n\n> The Amazon SageMaker Studio UI does not use the default instance type value set here. The default instance type set here is used when Apps are created using the AWS CLI or AWS CloudFormation and the instance type parameter value is not passed.", + "markdownDescription": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker AI image used by the KernelGateway app.\n\n> The Amazon SageMaker AI Studio UI does not use the default instance type value set here. The default instance type set here is used when Apps are created using the AWS CLI or AWS CloudFormation and the instance type parameter value is not passed.", "title": "DefaultResourceSpec" } }, @@ -245818,7 +245756,7 @@ "items": { "$ref": "#/definitions/AWS::SageMaker::Domain.CustomImage" }, - "markdownDescription": "A list of custom SageMaker images that are configured to run as a RSession app.", + "markdownDescription": "A list of custom SageMaker AI images that are configured to run as a RSession app.", "title": "CustomImages", "type": "array" }, @@ -245889,7 +245827,7 @@ "type": "string" }, "SageMakerImageArn": { - "markdownDescription": "The ARN of the SageMaker image that the image version belongs to.", + "markdownDescription": "The ARN of the SageMaker AI image that the image version belongs to.", "title": "SageMakerImageArn", "type": "string" }, @@ -245934,7 +245872,7 @@ "items": { "$ref": "#/definitions/AWS::SageMaker::Domain.CustomFileSystemConfig" }, - "markdownDescription": "The settings for assigning a custom file system to a user profile. Permitted users can access this file system in Amazon SageMaker Studio.\n\nSageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.", + "markdownDescription": "The settings for assigning a custom file system to a user profile. Permitted users can access this file system in Amazon SageMaker AI Studio.\n\nSageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.", "title": "CustomFileSystemConfigs", "type": "array" }, @@ -245982,13 +245920,13 @@ "items": { "type": "string" }, - "markdownDescription": "The security groups for the Amazon Virtual Private Cloud (VPC) that the domain uses for communication.\n\nOptional when the `CreateDomain.AppNetworkAccessType` parameter is set to `PublicInternetOnly` .\n\nRequired when the `CreateDomain.AppNetworkAccessType` parameter is set to `VpcOnly` , unless specified as part of the `DefaultUserSettings` for the domain.\n\nAmazon SageMaker adds a security group to allow NFS traffic from Amazon SageMaker Studio. Therefore, the number of security groups that you can specify is one less than the maximum number shown.\n\nSageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.", + "markdownDescription": "The security groups for the Amazon Virtual Private Cloud (VPC) that the domain uses for communication.\n\nOptional when the `CreateDomain.AppNetworkAccessType` parameter is set to `PublicInternetOnly` .\n\nRequired when the `CreateDomain.AppNetworkAccessType` parameter is set to `VpcOnly` , unless specified as part of the `DefaultUserSettings` for the domain.\n\nAmazon SageMaker AI adds a security group to allow NFS traffic from Amazon SageMaker AI Studio. Therefore, the number of security groups that you can specify is one less than the maximum number shown.\n\nSageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.", "title": "SecurityGroups", "type": "array" }, "SharingSettings": { "$ref": "#/definitions/AWS::SageMaker::Domain.SharingSettings", - "markdownDescription": "Specifies options for sharing Amazon SageMaker Studio notebooks.", + "markdownDescription": "Specifies options for sharing Amazon SageMaker AI Studio notebooks.", "title": "SharingSettings" }, "SpaceStorageSettings": { @@ -247608,7 +247546,7 @@ "title": "Container" }, "ModelName": { - "markdownDescription": "The name of an existing SageMaker model object in your account that you want to deploy with the inference component.", + "markdownDescription": "The name of an existing SageMaker AI model object in your account that you want to deploy with the inference component.", "title": "ModelName", "type": "string" }, @@ -247783,7 +247721,7 @@ "items": { "type": "string" }, - "markdownDescription": "The list of all content type headers that Amazon SageMaker will treat as CSV and capture accordingly.", + "markdownDescription": "The list of all content type headers that Amazon SageMaker AI will treat as CSV and capture accordingly.", "title": "CsvContentTypes", "type": "array" }, @@ -247791,7 +247729,7 @@ "items": { "type": "string" }, - "markdownDescription": "The list of all content type headers that SageMaker will treat as JSON and capture accordingly.", + "markdownDescription": "The list of all content type headers that SageMaker AI will treat as JSON and capture accordingly.", "title": "JsonContentTypes", "type": "array" } @@ -248712,7 +248650,7 @@ "additionalProperties": false, "properties": { "KmsKeyId": { - "markdownDescription": "The AWS Key Management Service ( AWS KMS ) key that Amazon SageMaker uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption.", + "markdownDescription": "The AWS Key Management Service ( AWS KMS ) key that Amazon SageMaker AI uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption.", "title": "KmsKeyId", "type": "string" }, @@ -249272,7 +249210,7 @@ "type": "string" }, "ModelId": { - "markdownDescription": "The SageMaker Model ARN or non- SageMaker Model ID.", + "markdownDescription": "The SageMaker AI Model ARN or non- SageMaker AI Model ID.", "title": "ModelId", "type": "string" }, @@ -249458,7 +249396,7 @@ "items": { "type": "string" }, - "markdownDescription": "SageMaker inference image URI.", + "markdownDescription": "SageMaker AI inference image URI.", "title": "ContainerImage", "type": "array" } @@ -249497,7 +249435,7 @@ "type": "array" }, "TrainingArn": { - "markdownDescription": "The SageMaker training job Amazon Resource Name (ARN)", + "markdownDescription": "The SageMaker AI training job Amazon Resource Name (ARN)", "title": "TrainingArn", "type": "string" }, @@ -249511,14 +249449,14 @@ }, "TrainingEnvironment": { "$ref": "#/definitions/AWS::SageMaker::ModelCard.TrainingEnvironment", - "markdownDescription": "The SageMaker training job image URI.", + "markdownDescription": "The SageMaker AI training job image URI.", "title": "TrainingEnvironment" }, "TrainingMetrics": { "items": { "$ref": "#/definitions/AWS::SageMaker::ModelCard.TrainingMetric" }, - "markdownDescription": "The SageMaker training job results.", + "markdownDescription": "The SageMaker AI training job results.", "title": "TrainingMetrics", "type": "array" }, @@ -249545,7 +249483,7 @@ "additionalProperties": false, "properties": { "Name": { - "markdownDescription": "The name of the result from the SageMaker training job.", + "markdownDescription": "The name of the result from the SageMaker AI training job.", "title": "Name", "type": "string" }, @@ -249555,7 +249493,7 @@ "type": "string" }, "Value": { - "markdownDescription": "The value of a result from the SageMaker training job.", + "markdownDescription": "The value of a result from the SageMaker AI training job.", "title": "Value", "type": "number" } @@ -249973,7 +249911,7 @@ "additionalProperties": false, "properties": { "KmsKeyId": { - "markdownDescription": "The AWS Key Management Service ( AWS KMS ) key that Amazon SageMaker uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption.", + "markdownDescription": "The AWS Key Management Service ( AWS KMS ) key that Amazon SageMaker AI uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption.", "title": "KmsKeyId", "type": "string" }, @@ -251555,7 +251493,7 @@ "additionalProperties": false, "properties": { "KmsKeyId": { - "markdownDescription": "The AWS Key Management Service ( AWS KMS ) key that Amazon SageMaker uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption.", + "markdownDescription": "The AWS Key Management Service ( AWS KMS ) key that Amazon SageMaker AI uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption.", "title": "KmsKeyId", "type": "string" }, @@ -252088,7 +252026,7 @@ "items": { "$ref": "#/definitions/AWS::SageMaker::MonitoringSchedule.MonitoringInput" }, - "markdownDescription": "The array of inputs for the monitoring job. Currently we support monitoring an Amazon SageMaker Endpoint.", + "markdownDescription": "The array of inputs for the monitoring job. Currently we support monitoring an Amazon SageMaker AI Endpoint.", "title": "MonitoringInputs", "type": "array" }, @@ -252108,7 +252046,7 @@ "title": "NetworkConfig" }, "RoleArn": { - "markdownDescription": "The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform tasks on your behalf.", + "markdownDescription": "The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker AI can assume to perform tasks on your behalf.", "title": "RoleArn", "type": "string" }, @@ -252145,7 +252083,7 @@ "additionalProperties": false, "properties": { "KmsKeyId": { - "markdownDescription": "The AWS Key Management Service ( AWS KMS ) key that Amazon SageMaker uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption.", + "markdownDescription": "The AWS Key Management Service ( AWS KMS ) key that Amazon SageMaker AI uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption.", "title": "KmsKeyId", "type": "string" }, @@ -252263,7 +252201,7 @@ "type": "string" }, "ScheduleExpression": { - "markdownDescription": "A cron expression that describes details about the monitoring schedule.\n\nThe supported cron expressions are:\n\n- If you want to set the job to start every hour, use the following:\n\n`Hourly: cron(0 * ? * * *)`\n- If you want to start the job daily:\n\n`cron(0 [00-23] ? * * *)`\n- If you want to run the job one time, immediately, use the following keyword:\n\n`NOW`\n\nFor example, the following are valid cron expressions:\n\n- Daily at noon UTC: `cron(0 12 ? * * *)`\n- Daily at midnight UTC: `cron(0 0 ? * * *)`\n\nTo support running every 6, 12 hours, the following are also supported:\n\n`cron(0 [00-23]/[01-24] ? * * *)`\n\nFor example, the following are valid cron expressions:\n\n- Every 12 hours, starting at 5pm UTC: `cron(0 17/12 ? * * *)`\n- Every two hours starting at midnight: `cron(0 0/2 ? * * *)`\n\n> - Even though the cron expression is set to start at 5PM UTC, note that there could be a delay of 0-20 minutes from the actual requested time to run the execution.\n> - We recommend that if you would like a daily schedule, you do not provide this parameter. Amazon SageMaker will pick a time for running every day. \n\nYou can also specify the keyword `NOW` to run the monitoring job immediately, one time, without recurring.", + "markdownDescription": "A cron expression that describes details about the monitoring schedule.\n\nThe supported cron expressions are:\n\n- If you want to set the job to start every hour, use the following:\n\n`Hourly: cron(0 * ? * * *)`\n- If you want to start the job daily:\n\n`cron(0 [00-23] ? * * *)`\n- If you want to run the job one time, immediately, use the following keyword:\n\n`NOW`\n\nFor example, the following are valid cron expressions:\n\n- Daily at noon UTC: `cron(0 12 ? * * *)`\n- Daily at midnight UTC: `cron(0 0 ? * * *)`\n\nTo support running every 6, 12 hours, the following are also supported:\n\n`cron(0 [00-23]/[01-24] ? * * *)`\n\nFor example, the following are valid cron expressions:\n\n- Every 12 hours, starting at 5pm UTC: `cron(0 17/12 ? * * *)`\n- Every two hours starting at midnight: `cron(0 0/2 ? * * *)`\n\n> - Even though the cron expression is set to start at 5PM UTC, note that there could be a delay of 0-20 minutes from the actual requested time to run the execution.\n> - We recommend that if you would like a daily schedule, you do not provide this parameter. Amazon SageMaker AI will pick a time for running every day. \n\nYou can also specify the keyword `NOW` to run the monitoring job immediately, one time, without recurring.", "title": "ScheduleExpression", "type": "string" } @@ -252371,17 +252309,17 @@ "items": { "type": "string" }, - "markdownDescription": "An array of up to three Git repositories associated with the notebook instance. These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in [AWS CodeCommit](https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) or in any other Git repository. These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see [Associating Git Repositories with SageMaker Notebook Instances](https://docs.aws.amazon.com/sagemaker/latest/dg/nbi-git-repo.html) .", + "markdownDescription": "An array of up to three Git repositories associated with the notebook instance. These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in [AWS CodeCommit](https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) or in any other Git repository. These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see [Associating Git Repositories with SageMaker AI Notebook Instances](https://docs.aws.amazon.com/sagemaker/latest/dg/nbi-git-repo.html) .", "title": "AdditionalCodeRepositories", "type": "array" }, "DefaultCodeRepository": { - "markdownDescription": "The Git repository associated with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in [AWS CodeCommit](https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) or in any other Git repository. When you open a notebook instance, it opens in the directory that contains this repository. For more information, see [Associating Git Repositories with SageMaker Notebook Instances](https://docs.aws.amazon.com/sagemaker/latest/dg/nbi-git-repo.html) .", + "markdownDescription": "The Git repository associated with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in [AWS CodeCommit](https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) or in any other Git repository. When you open a notebook instance, it opens in the directory that contains this repository. For more information, see [Associating Git Repositories with SageMaker AI Notebook Instances](https://docs.aws.amazon.com/sagemaker/latest/dg/nbi-git-repo.html) .", "title": "DefaultCodeRepository", "type": "string" }, "DirectInternetAccess": { - "markdownDescription": "Sets whether SageMaker provides internet access to the notebook instance. If you set this to `Disabled` this notebook instance is able to access resources only in your VPC, and is not be able to connect to SageMaker training and endpoint services unless you configure a NAT Gateway in your VPC.\n\nFor more information, see [Notebook Instances Are Internet-Enabled by Default](https://docs.aws.amazon.com/sagemaker/latest/dg/appendix-additional-considerations.html#appendix-notebook-and-internet-access) . You can set the value of this parameter to `Disabled` only if you set a value for the `SubnetId` parameter.", + "markdownDescription": "Sets whether SageMaker AI provides internet access to the notebook instance. If you set this to `Disabled` this notebook instance is able to access resources only in your VPC, and is not be able to connect to SageMaker AI training and endpoint services unless you configure a NAT Gateway in your VPC.\n\nFor more information, see [Notebook Instances Are Internet-Enabled by Default](https://docs.aws.amazon.com/sagemaker/latest/dg/appendix-additional-considerations.html#appendix-notebook-and-internet-access) . You can set the value of this parameter to `Disabled` only if you set a value for the `SubnetId` parameter.", "title": "DirectInternetAccess", "type": "string" }, @@ -252396,7 +252334,7 @@ "type": "string" }, "KmsKeyId": { - "markdownDescription": "The Amazon Resource Name (ARN) of a AWS Key Management Service key that SageMaker uses to encrypt data on the storage volume attached to your notebook instance. The KMS key you provide must be enabled. For information, see [Enabling and Disabling Keys](https://docs.aws.amazon.com/kms/latest/developerguide/enabling-keys.html) in the *AWS Key Management Service Developer Guide* .", + "markdownDescription": "The Amazon Resource Name (ARN) of a AWS Key Management Service key that SageMaker AI uses to encrypt data on the storage volume attached to your notebook instance. The KMS key you provide must be enabled. For information, see [Enabling and Disabling Keys](https://docs.aws.amazon.com/kms/latest/developerguide/enabling-keys.html) in the *AWS Key Management Service Developer Guide* .", "title": "KmsKeyId", "type": "string" }, @@ -252416,7 +252354,7 @@ "type": "string" }, "RoleArn": { - "markdownDescription": "When you send any requests to AWS resources from the notebook instance, SageMaker assumes this role to perform tasks on your behalf. You must grant this role necessary permissions so SageMaker can perform these tasks. The policy must allow the SageMaker service principal (sagemaker.amazonaws.com) permissions to assume this role. For more information, see [SageMaker Roles](https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html) .\n\n> To be able to pass this role to SageMaker, the caller of this API must have the `iam:PassRole` permission.", + "markdownDescription": "When you send any requests to AWS resources from the notebook instance, SageMaker AI assumes this role to perform tasks on your behalf. You must grant this role necessary permissions so SageMaker AI can perform these tasks. The policy must allow the SageMaker AI service principal (sagemaker.amazonaws.com) permissions to assume this role. For more information, see [SageMaker AI Roles](https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html) .\n\n> To be able to pass this role to SageMaker AI, the caller of this API must have the `iam:PassRole` permission.", "title": "RoleArn", "type": "string" }, @@ -253088,7 +253026,7 @@ "properties": { "DefaultResourceSpec": { "$ref": "#/definitions/AWS::SageMaker::Space.ResourceSpec", - "markdownDescription": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the JupyterServer app. If you use the `LifecycleConfigArns` parameter, then this parameter is also required.", + "markdownDescription": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker AI image used by the JupyterServer app. If you use the `LifecycleConfigArns` parameter, then this parameter is also required.", "title": "DefaultResourceSpec" } }, @@ -253101,13 +253039,13 @@ "items": { "$ref": "#/definitions/AWS::SageMaker::Space.CustomImage" }, - "markdownDescription": "A list of custom SageMaker images that are configured to run as a KernelGateway app.", + "markdownDescription": "A list of custom SageMaker AI images that are configured to run as a KernelGateway app.", "title": "CustomImages", "type": "array" }, "DefaultResourceSpec": { "$ref": "#/definitions/AWS::SageMaker::Space.ResourceSpec", - "markdownDescription": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the KernelGateway app.\n\n> The Amazon SageMaker Studio UI does not use the default instance type value set here. The default instance type set here is used when Apps are created using the AWS CLI or AWS CloudFormation and the instance type parameter value is not passed.", + "markdownDescription": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker AI image used by the KernelGateway app.\n\n> The Amazon SageMaker AI Studio UI does not use the default instance type value set here. The default instance type set here is used when Apps are created using the AWS CLI or AWS CloudFormation and the instance type parameter value is not passed.", "title": "DefaultResourceSpec" } }, @@ -253136,7 +253074,7 @@ "type": "string" }, "SageMakerImageArn": { - "markdownDescription": "The ARN of the SageMaker image that the image version belongs to.", + "markdownDescription": "The ARN of the SageMaker AI image that the image version belongs to.", "title": "SageMakerImageArn", "type": "string" }, @@ -253182,7 +253120,7 @@ "additionalProperties": false, "properties": { "AppType": { - "markdownDescription": "The type of app created within the space.", + "markdownDescription": "The type of app created within the space.\n\nIf using the [UpdateSpace](https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_UpdateSpace.html) API, you can't change the app type of your space by specifying a different value for this field.", "title": "AppType", "type": "string" }, @@ -253195,7 +253133,7 @@ "items": { "$ref": "#/definitions/AWS::SageMaker::Space.CustomFileSystem" }, - "markdownDescription": "A file system, created by you, that you assign to a space for an Amazon SageMaker Domain. Permitted users can access this file system in Amazon SageMaker Studio.", + "markdownDescription": "A file system, created by you, that you assign to a space for an Amazon SageMaker AI Domain. Permitted users can access this file system in Amazon SageMaker AI Studio.", "title": "CustomFileSystems", "type": "array" }, @@ -253480,7 +253418,7 @@ "type": "string" }, "FileSystemPath": { - "markdownDescription": "The path to the file system directory that is accessible in Amazon SageMaker Studio. Permitted users can access only this directory and below.", + "markdownDescription": "The path to the file system directory that is accessible in Amazon SageMaker AI Studio. Permitted users can access only this directory and below.", "title": "FileSystemPath", "type": "string" } @@ -253543,13 +253481,13 @@ "items": { "$ref": "#/definitions/AWS::SageMaker::UserProfile.CustomImage" }, - "markdownDescription": "A list of custom SageMaker images that are configured to run as a KernelGateway app.", + "markdownDescription": "A list of custom SageMaker AI images that are configured to run as a KernelGateway app.", "title": "CustomImages", "type": "array" }, "DefaultResourceSpec": { "$ref": "#/definitions/AWS::SageMaker::UserProfile.ResourceSpec", - "markdownDescription": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the KernelGateway app.\n\n> The Amazon SageMaker Studio UI does not use the default instance type value set here. The default instance type set here is used when Apps are created using the AWS CLI or AWS CloudFormation and the instance type parameter value is not passed.", + "markdownDescription": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker AI image used by the KernelGateway app.\n\n> The Amazon SageMaker AI Studio UI does not use the default instance type value set here. The default instance type set here is used when Apps are created using the AWS CLI or AWS CloudFormation and the instance type parameter value is not passed.", "title": "DefaultResourceSpec" } }, @@ -253580,7 +253518,7 @@ "type": "string" }, "SageMakerImageArn": { - "markdownDescription": "The ARN of the SageMaker image that the image version belongs to.", + "markdownDescription": "The ARN of the SageMaker AI image that the image version belongs to.", "title": "SageMakerImageArn", "type": "string" }, @@ -253625,7 +253563,7 @@ "items": { "$ref": "#/definitions/AWS::SageMaker::UserProfile.CustomFileSystemConfig" }, - "markdownDescription": "The settings for assigning a custom file system to a user profile. Permitted users can access this file system in Amazon SageMaker Studio.\n\nSageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.", + "markdownDescription": "The settings for assigning a custom file system to a user profile. Permitted users can access this file system in Amazon SageMaker AI Studio.\n\nSageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.", "title": "CustomFileSystemConfigs", "type": "array" }, @@ -253668,13 +253606,13 @@ "items": { "type": "string" }, - "markdownDescription": "The security groups for the Amazon Virtual Private Cloud (VPC) that the domain uses for communication.\n\nOptional when the `CreateDomain.AppNetworkAccessType` parameter is set to `PublicInternetOnly` .\n\nRequired when the `CreateDomain.AppNetworkAccessType` parameter is set to `VpcOnly` , unless specified as part of the `DefaultUserSettings` for the domain.\n\nAmazon SageMaker adds a security group to allow NFS traffic from Amazon SageMaker Studio. Therefore, the number of security groups that you can specify is one less than the maximum number shown.\n\nSageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.", + "markdownDescription": "The security groups for the Amazon Virtual Private Cloud (VPC) that the domain uses for communication.\n\nOptional when the `CreateDomain.AppNetworkAccessType` parameter is set to `PublicInternetOnly` .\n\nRequired when the `CreateDomain.AppNetworkAccessType` parameter is set to `VpcOnly` , unless specified as part of the `DefaultUserSettings` for the domain.\n\nAmazon SageMaker AI adds a security group to allow NFS traffic from Amazon SageMaker AI Studio. Therefore, the number of security groups that you can specify is one less than the maximum number shown.\n\nSageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.", "title": "SecurityGroups", "type": "array" }, "SharingSettings": { "$ref": "#/definitions/AWS::SageMaker::UserProfile.SharingSettings", - "markdownDescription": "Specifies options for sharing Amazon SageMaker Studio notebooks.", + "markdownDescription": "Specifies options for sharing Amazon SageMaker AI Studio notebooks.", "title": "SharingSettings" }, "SpaceStorageSettings": { @@ -255165,7 +255103,7 @@ "items": { "$ref": "#/definitions/AWS::SecurityHub::AutomationRule.DateFilter" }, - "markdownDescription": "A timestamp that indicates when this finding record was created.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats that you can send to Security Hub:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )\n\nIf a finding provider sends a finding to Security Hub that contains a timestamp in nanoseconds, we round it to milliseconds. For example, we round `2024-10-31T23:00:00.123456789Z` to `2024-10-31T23:00:00.123Z` .\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", + "markdownDescription": "A timestamp that indicates when this finding record was created.\n\nFor more information about the validation and formatting of timestamp fields in AWS Security Hub , see [Timestamps](https://docs.aws.amazon.com/securityhub/1.0/APIReference/Welcome.html#timestamps) .\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", "title": "CreatedAt", "type": "array" }, @@ -255189,7 +255127,7 @@ "items": { "$ref": "#/definitions/AWS::SecurityHub::AutomationRule.DateFilter" }, - "markdownDescription": "A timestamp that indicates when the potential security issue captured by a finding was first observed by the security findings product.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats that you can send to Security Hub:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )\n\nIf a finding provider sends a finding to Security Hub that contains a timestamp in nanoseconds, we round it to milliseconds. For example, we round `2024-10-31T23:00:00.123456789Z` to `2024-10-31T23:00:00.123Z` .\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", + "markdownDescription": "A timestamp that indicates when the potential security issue captured by a finding was first observed by the security findings product.\n\nFor more information about the validation and formatting of timestamp fields in AWS Security Hub , see [Timestamps](https://docs.aws.amazon.com/securityhub/1.0/APIReference/Welcome.html#timestamps) .\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", "title": "FirstObservedAt", "type": "array" }, @@ -255213,7 +255151,7 @@ "items": { "$ref": "#/definitions/AWS::SecurityHub::AutomationRule.DateFilter" }, - "markdownDescription": "A timestamp that indicates when the potential security issue captured by a finding was most recently observed by the security findings product.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats that you can send to Security Hub:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )\n\nIf a finding provider sends a finding to Security Hub that contains a timestamp in nanoseconds, we round it to milliseconds. For example, we round `2024-10-31T23:00:00.123456789Z` to `2024-10-31T23:00:00.123Z` .\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", + "markdownDescription": "A timestamp that indicates when the security findings provider most recently observed a change in the resource that is involved in the finding.\n\nFor more information about the validation and formatting of timestamp fields in AWS Security Hub , see [Timestamps](https://docs.aws.amazon.com/securityhub/1.0/APIReference/Welcome.html#timestamps) .\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", "title": "LastObservedAt", "type": "array" }, @@ -255229,7 +255167,7 @@ "items": { "$ref": "#/definitions/AWS::SecurityHub::AutomationRule.DateFilter" }, - "markdownDescription": "The timestamp of when the note was updated.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats that you can send to Security Hub:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )\n\nIf a finding provider sends a finding to Security Hub that contains a timestamp in nanoseconds, we round it to milliseconds. For example, we round `2024-10-31T23:00:00.123456789Z` to `2024-10-31T23:00:00.123Z` .\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", + "markdownDescription": "The timestamp of when the note was updated.\n\nFor more information about the validation and formatting of timestamp fields in AWS Security Hub , see [Timestamps](https://docs.aws.amazon.com/securityhub/1.0/APIReference/Welcome.html#timestamps) .\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", "title": "NoteUpdatedAt", "type": "array" }, @@ -255365,7 +255303,7 @@ "items": { "$ref": "#/definitions/AWS::SecurityHub::AutomationRule.DateFilter" }, - "markdownDescription": "A timestamp that indicates when the finding record was most recently updated.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats that you can send to Security Hub:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )\n\nIf a finding provider sends a finding to Security Hub that contains a timestamp in nanoseconds, we round it to milliseconds. For example, we round `2024-10-31T23:00:00.123456789Z` to `2024-10-31T23:00:00.123Z` .\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", + "markdownDescription": "A timestamp that indicates when the finding record was most recently updated.\n\nFor more information about the validation and formatting of timestamp fields in AWS Security Hub , see [Timestamps](https://docs.aws.amazon.com/securityhub/1.0/APIReference/Welcome.html#timestamps) .\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", "title": "UpdatedAt", "type": "array" }, @@ -255405,12 +255343,12 @@ "title": "DateRange" }, "End": { - "markdownDescription": "A timestamp that provides the end date for the date filter.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats that you can send to Security Hub:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )\n\nIf a finding provider sends a finding to Security Hub that contains a timestamp in nanoseconds, we round it to milliseconds. For example, we round `2024-10-31T23:00:00.123456789Z` to `2024-10-31T23:00:00.123Z` .", + "markdownDescription": "A timestamp that provides the end date for the date filter.\n\nFor more information about the validation and formatting of timestamp fields in AWS Security Hub , see [Timestamps](https://docs.aws.amazon.com/securityhub/1.0/APIReference/Welcome.html#timestamps) .", "title": "End", "type": "string" }, "Start": { - "markdownDescription": "A timestamp that provides the start date for the date filter.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats that you can send to Security Hub:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )\n\nIf a finding provider sends a finding to Security Hub that contains a timestamp in nanoseconds, we round it to milliseconds. For example, we round `2024-10-31T23:00:00.123456789Z` to `2024-10-31T23:00:00.123Z` .", + "markdownDescription": "A timestamp that provides the start date for the date filter.\n\nFor more information about the validation and formatting of timestamp fields in AWS Security Hub , see [Timestamps](https://docs.aws.amazon.com/securityhub/1.0/APIReference/Welcome.html#timestamps) .", "title": "Start", "type": "string" } @@ -255682,7 +255620,7 @@ "additionalProperties": false, "properties": { "AutoEnableControls": { - "markdownDescription": "Whether to automatically enable new controls when they are added to standards that are enabled.\n\nBy default, this is set to `true` , and new controls are enabled automatically. To not automatically enable new controls, set this to `false` .", + "markdownDescription": "Whether to automatically enable new controls when they are added to standards that are enabled.\n\nBy default, this is set to `true` , and new controls are enabled automatically. To not automatically enable new controls, set this to `false` .\n\nWhen you automatically enable new controls, you can interact with the controls in the console and programmatically immediately after release. However, automatically enabled controls have a temporary default status of `DISABLED` . It can take up to several days for Security Hub to process the control release and designate the control as `ENABLED` in your account. During the processing period, you can manually enable or disable a control, and Security Hub will maintain that designation regardless of whether you have `AutoEnableControls` set to `true` .", "title": "AutoEnableControls", "type": "boolean" }, @@ -255888,7 +255826,7 @@ "items": { "$ref": "#/definitions/AWS::SecurityHub::Insight.DateFilter" }, - "markdownDescription": "A timestamp that indicates when the security findings provider created the potential security issue that a finding reflects.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats that you can send to Security Hub:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )\n\nIf a finding provider sends a finding to Security Hub that contains a timestamp in nanoseconds, we round it to milliseconds. For example, we round `2024-10-31T23:00:00.123456789Z` to `2024-10-31T23:00:00.123Z` .", + "markdownDescription": "A timestamp that indicates when the security findings provider created the potential security issue that a finding reflects.\n\nFor more information about the validation and formatting of timestamp fields in AWS Security Hub , see [Timestamps](https://docs.aws.amazon.com/securityhub/1.0/APIReference/Welcome.html#timestamps) .", "title": "CreatedAt", "type": "array" }, @@ -255968,7 +255906,7 @@ "items": { "$ref": "#/definitions/AWS::SecurityHub::Insight.DateFilter" }, - "markdownDescription": "A timestamp that indicates when the security findings provider first observed the potential security issue that a finding captured.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats that you can send to Security Hub:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )\n\nIf a finding provider sends a finding to Security Hub that contains a timestamp in nanoseconds, we round it to milliseconds. For example, we round `2024-10-31T23:00:00.123456789Z` to `2024-10-31T23:00:00.123Z` .", + "markdownDescription": "A timestamp that indicates when the security findings provider first observed the potential security issue that a finding captured.\n\nFor more information about the validation and formatting of timestamp fields in AWS Security Hub , see [Timestamps](https://docs.aws.amazon.com/securityhub/1.0/APIReference/Welcome.html#timestamps) .", "title": "FirstObservedAt", "type": "array" }, @@ -255992,7 +255930,7 @@ "items": { "$ref": "#/definitions/AWS::SecurityHub::Insight.DateFilter" }, - "markdownDescription": "A timestamp that indicates when the security findings provider most recently observed the potential security issue that a finding captured.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats that you can send to Security Hub:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )\n\nIf a finding provider sends a finding to Security Hub that contains a timestamp in nanoseconds, we round it to milliseconds. For example, we round `2024-10-31T23:00:00.123456789Z` to `2024-10-31T23:00:00.123Z` .", + "markdownDescription": "A timestamp that indicates when the security findings provider most recently observed a change in the resource that is involved in the finding.\n\nFor more information about the validation and formatting of timestamp fields in AWS Security Hub , see [Timestamps](https://docs.aws.amazon.com/securityhub/1.0/APIReference/Welcome.html#timestamps) .", "title": "LastObservedAt", "type": "array" }, @@ -256144,7 +256082,7 @@ "items": { "$ref": "#/definitions/AWS::SecurityHub::Insight.DateFilter" }, - "markdownDescription": "A timestamp that identifies when the process was launched.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats that you can send to Security Hub:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )\n\nIf a finding provider sends a finding to Security Hub that contains a timestamp in nanoseconds, we round it to milliseconds. For example, we round `2024-10-31T23:00:00.123456789Z` to `2024-10-31T23:00:00.123Z` .", + "markdownDescription": "A timestamp that identifies when the process was launched.\n\nFor more information about the validation and formatting of timestamp fields in AWS Security Hub , see [Timestamps](https://docs.aws.amazon.com/securityhub/1.0/APIReference/Welcome.html#timestamps) .", "title": "ProcessLaunchedAt", "type": "array" }, @@ -256184,7 +256122,7 @@ "items": { "$ref": "#/definitions/AWS::SecurityHub::Insight.DateFilter" }, - "markdownDescription": "A timestamp that identifies when the process was terminated.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats that you can send to Security Hub:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )\n\nIf a finding provider sends a finding to Security Hub that contains a timestamp in nanoseconds, we round it to milliseconds. For example, we round `2024-10-31T23:00:00.123456789Z` to `2024-10-31T23:00:00.123Z` .", + "markdownDescription": "A timestamp that identifies when the process was terminated.\n\nFor more information about the validation and formatting of timestamp fields in AWS Security Hub , see [Timestamps](https://docs.aws.amazon.com/securityhub/1.0/APIReference/Welcome.html#timestamps) .", "title": "ProcessTerminatedAt", "type": "array" }, @@ -256408,7 +256346,7 @@ "items": { "$ref": "#/definitions/AWS::SecurityHub::Insight.DateFilter" }, - "markdownDescription": "A timestamp that identifies when the container was started.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats that you can send to Security Hub:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )\n\nIf a finding provider sends a finding to Security Hub that contains a timestamp in nanoseconds, we round it to milliseconds. For example, we round `2024-10-31T23:00:00.123456789Z` to `2024-10-31T23:00:00.123Z` .", + "markdownDescription": "A timestamp that identifies when the container was started.\n\nFor more information about the validation and formatting of timestamp fields in AWS Security Hub , see [Timestamps](https://docs.aws.amazon.com/securityhub/1.0/APIReference/Welcome.html#timestamps) .", "title": "ResourceContainerLaunchedAt", "type": "array" }, @@ -256504,7 +256442,7 @@ "items": { "$ref": "#/definitions/AWS::SecurityHub::Insight.DateFilter" }, - "markdownDescription": "A timestamp that identifies the last observation of a threat intelligence indicator.", + "markdownDescription": "A timestamp that identifies the last observation of a threat intelligence indicator.\n\nFor more information about the validation and formatting of timestamp fields in AWS Security Hub , see [Timestamps](https://docs.aws.amazon.com/securityhub/1.0/APIReference/Welcome.html#timestamps) .", "title": "ThreatIntelIndicatorLastObservedAt", "type": "array" }, @@ -256560,7 +256498,7 @@ "items": { "$ref": "#/definitions/AWS::SecurityHub::Insight.DateFilter" }, - "markdownDescription": "A timestamp that indicates when the security findings provider last updated the finding record.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats that you can send to Security Hub:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )\n\nIf a finding provider sends a finding to Security Hub that contains a timestamp in nanoseconds, we round it to milliseconds. For example, we round `2024-10-31T23:00:00.123456789Z` to `2024-10-31T23:00:00.123Z` .", + "markdownDescription": "A timestamp that indicates when the security findings provider last updated the finding record.\n\nFor more information about the validation and formatting of timestamp fields in AWS Security Hub , see [Timestamps](https://docs.aws.amazon.com/securityhub/1.0/APIReference/Welcome.html#timestamps) .", "title": "UpdatedAt", "type": "array" }, @@ -256638,12 +256576,12 @@ "title": "DateRange" }, "End": { - "markdownDescription": "A timestamp that provides the end date for the date filter.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats that you can send to Security Hub:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )\n\nIf a finding provider sends a finding to Security Hub that contains a timestamp in nanoseconds, we round it to milliseconds. For example, we round `2024-10-31T23:00:00.123456789Z` to `2024-10-31T23:00:00.123Z` .", + "markdownDescription": "A timestamp that provides the end date for the date filter.\n\nFor more information about the validation and formatting of timestamp fields in AWS Security Hub , see [Timestamps](https://docs.aws.amazon.com/securityhub/1.0/APIReference/Welcome.html#timestamps) .", "title": "End", "type": "string" }, "Start": { - "markdownDescription": "A timestamp that provides the start date for the date filter.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats that you can send to Security Hub:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )\n\nIf a finding provider sends a finding to Security Hub that contains a timestamp in nanoseconds, we round it to milliseconds. For example, we round `2024-10-31T23:00:00.123456789Z` to `2024-10-31T23:00:00.123Z` .", + "markdownDescription": "A timestamp that provides the start date for the date filter.\n\nFor more information about the validation and formatting of timestamp fields in AWS Security Hub , see [Timestamps](https://docs.aws.amazon.com/securityhub/1.0/APIReference/Welcome.html#timestamps) .", "title": "Start", "type": "string" } @@ -261449,7 +261387,7 @@ "type": "string" }, "FailureRetentionPeriod": { - "markdownDescription": "The number of days to retain data about failed runs of this canary. If you omit this field, the default of 31 days is used. The valid range is 1 to 455 days.", + "markdownDescription": "The number of days to retain data about failed runs of this canary. If you omit this field, the default of 31 days is used. The valid range is 1 to 455 days.\n\nThis setting affects the range of information returned by [GetCanaryRuns](https://docs.aws.amazon.com/AmazonSynthetics/latest/APIReference/API_GetCanaryRuns.html) , as well as the range of information displayed in the Synthetics console.", "title": "FailureRetentionPeriod", "type": "number" }, @@ -261479,7 +261417,7 @@ "type": "boolean" }, "SuccessRetentionPeriod": { - "markdownDescription": "The number of days to retain data about successful runs of this canary. If you omit this field, the default of 31 days is used. The valid range is 1 to 455 days.", + "markdownDescription": "The number of days to retain data about successful runs of this canary. If you omit this field, the default of 31 days is used. The valid range is 1 to 455 days.\n\nThis setting affects the range of information returned by [GetCanaryRuns](https://docs.aws.amazon.com/AmazonSynthetics/latest/APIReference/API_GetCanaryRuns.html) , as well as the range of information displayed in the Synthetics console.", "title": "SuccessRetentionPeriod", "type": "number" }, @@ -264668,7 +264606,7 @@ "type": "string" }, "ResourceIdentifier": { - "markdownDescription": "The ID or Amazon Resource Name (ARN) of the service network or service.", + "markdownDescription": "The ID or ARN of the service network or service.", "title": "ResourceIdentifier", "type": "string" }, @@ -264748,7 +264686,7 @@ "type": "object" }, "ResourceIdentifier": { - "markdownDescription": "The ID or Amazon Resource Name (ARN) of the service network or service for which the policy is created.", + "markdownDescription": "The ID or ARN of the service network or service for which the policy is created.", "title": "ResourceIdentifier", "type": "string" } @@ -264836,7 +264774,7 @@ "type": "string" }, "ServiceIdentifier": { - "markdownDescription": "The ID or Amazon Resource Name (ARN) of the service.", + "markdownDescription": "The ID or ARN of the service.", "title": "ServiceIdentifier", "type": "string" }, @@ -265056,7 +264994,7 @@ "title": "Action" }, "ListenerIdentifier": { - "markdownDescription": "The ID or Amazon Resource Name (ARN) of the listener.", + "markdownDescription": "The ID or ARN of the listener.", "title": "ListenerIdentifier", "type": "string" }, @@ -265076,7 +265014,7 @@ "type": "number" }, "ServiceIdentifier": { - "markdownDescription": "The ID or Amazon Resource Name (ARN) of the service.", + "markdownDescription": "The ID or ARN of the service.", "title": "ServiceIdentifier", "type": "string" }, @@ -265526,12 +265464,12 @@ "title": "DnsEntry" }, "ServiceIdentifier": { - "markdownDescription": "The ID or Amazon Resource Name (ARN) of the service.", + "markdownDescription": "The ID or ARN of the service.", "title": "ServiceIdentifier", "type": "string" }, "ServiceNetworkIdentifier": { - "markdownDescription": "The ID or Amazon Resource Name (ARN) of the service network. You must use the ARN if the resources specified in the operation are in different accounts.", + "markdownDescription": "The ID or ARN of the service network. You must use an ARN if the resources are in different accounts.", "title": "ServiceNetworkIdentifier", "type": "string" }, @@ -265626,7 +265564,7 @@ "type": "array" }, "ServiceNetworkIdentifier": { - "markdownDescription": "The ID or Amazon Resource Name (ARN) of the service network. You must use the ARN when the resources specified in the operation are in different accounts.", + "markdownDescription": "The ID or ARN of the service network. You must use an ARN if the resources are in different accounts.", "title": "ServiceNetworkIdentifier", "type": "string" }, @@ -269089,7 +269027,7 @@ "title": "ForwardedIPConfig" }, "Limit": { - "markdownDescription": "The limit on requests per 5-minute period for a single aggregation instance for the rate-based rule. If the rate-based statement includes a `ScopeDownStatement` , this limit is applied only to the requests that match the statement.\n\nExamples:\n\n- If you aggregate on just the IP address, this is the limit on requests from any single IP address.\n- If you aggregate on the HTTP method and the query argument name \"city\", then this is the limit on requests for any single method, city pair.", + "markdownDescription": "The limit on requests during the specified evaluation window for a single aggregation instance for the rate-based rule. If the rate-based statement includes a `ScopeDownStatement` , this limit is applied only to the requests that match the statement.\n\nExamples:\n\n- If you aggregate on just the IP address, this is the limit on requests from any single IP address.\n- If you aggregate on the HTTP method and the query argument name \"city\", then this is the limit on requests for any single method, city pair.", "title": "Limit", "type": "number" }, @@ -269363,7 +269301,7 @@ "items": { "$ref": "#/definitions/AWS::WAFv2::RuleGroup.Label" }, - "markdownDescription": "Labels to apply to web requests that match the rule match statement. AWS WAF applies fully qualified labels to matching web requests. A fully qualified label is the concatenation of a label namespace and a rule label. The rule's rule group or web ACL defines the label namespace.\n\nRules that run after this rule in the web ACL can match against these labels using a `LabelMatchStatement` .\n\nFor each label, provide a case-sensitive string containing optional namespaces and a label name, according to the following guidelines:\n\n- Separate each component of the label with a colon.\n- Each namespace or name can have up to 128 characters.\n- You can specify up to 5 namespaces in a label.\n- Don't use the following reserved words in your label specification: `aws` , `waf` , `managed` , `rulegroup` , `webacl` , `regexpatternset` , or `ipset` .\n\nFor example, `myLabelName` or `nameSpace1:nameSpace2:myLabelName` .", + "markdownDescription": "Labels to apply to web requests that match the rule match statement. AWS WAF applies fully qualified labels to matching web requests. A fully qualified label is the concatenation of a label namespace and a rule label. The rule's rule group or web ACL defines the label namespace.\n\n> Any rule that isn't a rule group reference statement or managed rule group statement can add labels to matching web requests. \n\nRules that run after this rule in the web ACL can match against these labels using a `LabelMatchStatement` .\n\nFor each label, provide a case-sensitive string containing optional namespaces and a label name, according to the following guidelines:\n\n- Separate each component of the label with a colon.\n- Each namespace or name can have up to 128 characters.\n- You can specify up to 5 namespaces in a label.\n- Don't use the following reserved words in your label specification: `aws` , `waf` , `managed` , `rulegroup` , `webacl` , `regexpatternset` , or `ipset` .\n\nFor example, `myLabelName` or `nameSpace1:nameSpace2:myLabelName` .", "title": "RuleLabels", "type": "array" }, @@ -270591,7 +270529,7 @@ "items": { "$ref": "#/definitions/AWS::WAFv2::WebACL.RuleActionOverride" }, - "markdownDescription": "Action settings to use in the place of the rule actions that are configured inside the rule group. You specify one override for each rule whose action you want to change.\n\nYou can use overrides for testing, for example you can override all of rule actions to `Count` and then monitor the resulting count metrics to understand how the rule group would handle your web traffic. You can also permanently override some or all actions, to modify how the rule group manages your web traffic.", + "markdownDescription": "Action settings to use in the place of the rule actions that are configured inside the rule group. You specify one override for each rule whose action you want to change.\n\n> Take care to verify the rule names in your overrides. If you provide a rule name that doesn't match the name of any rule in the rule group, AWS WAF doesn't return an error and doesn't apply the override setting. \n\nYou can use overrides for testing, for example you can override all of rule actions to `Count` and then monitor the resulting count metrics to understand how the rule group would handle your web traffic. You can also permanently override some or all actions, to modify how the rule group manages your web traffic.", "title": "RuleActionOverrides", "type": "array" }, @@ -270691,7 +270629,7 @@ "title": "ForwardedIPConfig" }, "Limit": { - "markdownDescription": "The limit on requests per 5-minute period for a single aggregation instance for the rate-based rule. If the rate-based statement includes a `ScopeDownStatement` , this limit is applied only to the requests that match the statement.\n\nExamples:\n\n- If you aggregate on just the IP address, this is the limit on requests from any single IP address.\n- If you aggregate on the HTTP method and the query argument name \"city\", then this is the limit on requests for any single method, city pair.", + "markdownDescription": "The limit on requests during the specified evaluation window for a single aggregation instance for the rate-based rule. If the rate-based statement includes a `ScopeDownStatement` , this limit is applied only to the requests that match the statement.\n\nExamples:\n\n- If you aggregate on just the IP address, this is the limit on requests from any single IP address.\n- If you aggregate on the HTTP method and the query argument name \"city\", then this is the limit on requests for any single method, city pair.", "title": "Limit", "type": "number" }, @@ -271197,7 +271135,7 @@ "items": { "$ref": "#/definitions/AWS::WAFv2::WebACL.Label" }, - "markdownDescription": "Labels to apply to web requests that match the rule match statement. AWS WAF applies fully qualified labels to matching web requests. A fully qualified label is the concatenation of a label namespace and a rule label. The rule's rule group or web ACL defines the label namespace.\n\nRules that run after this rule in the web ACL can match against these labels using a `LabelMatchStatement` .\n\nFor each label, provide a case-sensitive string containing optional namespaces and a label name, according to the following guidelines:\n\n- Separate each component of the label with a colon.\n- Each namespace or name can have up to 128 characters.\n- You can specify up to 5 namespaces in a label.\n- Don't use the following reserved words in your label specification: `aws` , `waf` , `managed` , `rulegroup` , `webacl` , `regexpatternset` , or `ipset` .\n\nFor example, `myLabelName` or `nameSpace1:nameSpace2:myLabelName` .", + "markdownDescription": "Labels to apply to web requests that match the rule match statement. AWS WAF applies fully qualified labels to matching web requests. A fully qualified label is the concatenation of a label namespace and a rule label. The rule's rule group or web ACL defines the label namespace.\n\n> Any rule that isn't a rule group reference statement or managed rule group statement can add labels to matching web requests. \n\nRules that run after this rule in the web ACL can match against these labels using a `LabelMatchStatement` .\n\nFor each label, provide a case-sensitive string containing optional namespaces and a label name, according to the following guidelines:\n\n- Separate each component of the label with a colon.\n- Each namespace or name can have up to 128 characters.\n- You can specify up to 5 namespaces in a label.\n- Don't use the following reserved words in your label specification: `aws` , `waf` , `managed` , `rulegroup` , `webacl` , `regexpatternset` , or `ipset` .\n\nFor example, `myLabelName` or `nameSpace1:nameSpace2:myLabelName` .", "title": "RuleLabels", "type": "array" }, @@ -271291,7 +271229,7 @@ "items": { "$ref": "#/definitions/AWS::WAFv2::WebACL.RuleActionOverride" }, - "markdownDescription": "Action settings to use in the place of the rule actions that are configured inside the rule group. You specify one override for each rule whose action you want to change.\n\nYou can use overrides for testing, for example you can override all of rule actions to `Count` and then monitor the resulting count metrics to understand how the rule group would handle your web traffic. You can also permanently override some or all actions, to modify how the rule group manages your web traffic.", + "markdownDescription": "Action settings to use in the place of the rule actions that are configured inside the rule group. You specify one override for each rule whose action you want to change.\n\n> Take care to verify the rule names in your overrides. If you provide a rule name that doesn't match the name of any rule in the rule group, AWS WAF doesn't return an error and doesn't apply the override setting. \n\nYou can use overrides for testing, for example you can override all of rule actions to `Count` and then monitor the resulting count metrics to understand how the rule group would handle your web traffic. You can also permanently override some or all actions, to modify how the rule group manages your web traffic.", "title": "RuleActionOverrides", "type": "array" } diff --git a/schema_source/cloudformation-docs.json b/schema_source/cloudformation-docs.json index 0d3db6bf4..b0d817c08 100644 --- a/schema_source/cloudformation-docs.json +++ b/schema_source/cloudformation-docs.json @@ -1330,6 +1330,7 @@ }, "AWS::AppConfig::ConfigurationProfile": { "ApplicationId": "The application ID.", + "DeletionProtectionCheck": "", "Description": "A description of the configuration profile.", "KmsKeyIdentifier": "The AWS Key Management Service key identifier (key ID, key alias, or key ARN) provided when the resource was created or updated.", "LocationUri": "A URI to locate the configuration. You can specify the following:\n\n- For the AWS AppConfig hosted configuration store and for feature flags, specify `hosted` .\n- For an AWS Systems Manager Parameter Store parameter, specify either the parameter name in the format `ssm-parameter://` or the ARN.\n- For an AWS CodePipeline pipeline, specify the URI in the following format: `codepipeline` ://.\n- For an AWS Secrets Manager secret, specify the URI in the following format: `secretsmanager` ://.\n- For an Amazon S3 object, specify the URI in the following format: `s3:///` . Here is an example: `s3://amzn-s3-demo-bucket/my-app/us-east-1/my-config.json`\n- For an SSM document, specify either the document name in the format `ssm-document://` or the Amazon Resource Name (ARN).", @@ -1383,6 +1384,7 @@ }, "AWS::AppConfig::Environment": { "ApplicationId": "The application ID.", + "DeletionProtectionCheck": "", "Description": "A description of the environment.", "Monitors": "Amazon CloudWatch alarms to monitor during the deployment process.", "Name": "A name for the environment.", @@ -4815,10 +4817,6 @@ "Timeout": "The timeout time for jobs that are submitted with this job definition. After the amount of time you specify passes, AWS Batch terminates your jobs if they aren't finished.", "Type": "The type of job definition. For more information about multi-node parallel jobs, see [Creating a multi-node parallel job definition](https://docs.aws.amazon.com/batch/latest/userguide/multi-node-job-def.html) in the *AWS Batch User Guide* .\n\n- If the value is `container` , then one of the following is required: `containerProperties` , `ecsProperties` , or `eksProperties` .\n- If the value is `multinode` , then `nodeProperties` is required.\n\n> If the job is run on Fargate resources, then `multinode` isn't supported." }, - "AWS::Batch::JobDefinition AuthorizationConfig": { - "AccessPointId": "The Amazon EFS access point ID to use. If an access point is specified, the root directory value specified in the `EFSVolumeConfiguration` must either be omitted or set to `/` which enforces the path set on the EFS access point. If an access point is used, transit encryption must be enabled in the `EFSVolumeConfiguration` . For more information, see [Working with Amazon EFS access points](https://docs.aws.amazon.com/efs/latest/ug/efs-access-points.html) in the *Amazon Elastic File System User Guide* .", - "Iam": "Whether or not to use the AWS Batch job IAM role defined in a job definition when mounting the Amazon EFS file system. If enabled, transit encryption must be enabled in the `EFSVolumeConfiguration` . If this parameter is omitted, the default value of `DISABLED` is used. For more information, see [Using Amazon EFS access points](https://docs.aws.amazon.com/batch/latest/userguide/efs-volumes.html#efs-volume-accesspoints) in the *AWS Batch User Guide* . EFS IAM authorization requires that `TransitEncryption` be `ENABLED` and that a `JobRoleArn` is specified." - }, "AWS::Batch::JobDefinition ContainerProperties": { "Command": "The command that's passed to the container. This parameter maps to `Cmd` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/) and the `COMMAND` parameter to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) . For more information, see [https://docs.docker.com/engine/reference/builder/#cmd](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/builder/#cmd) .", "Environment": "The environment variables to pass to a container. This parameter maps to `Env` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/) and the `--env` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) .\n\n> We don't recommend using plaintext environment variables for sensitive information, such as credential data. > Environment variables cannot start with \" `AWS_BATCH` \". This naming convention is reserved for variables that AWS Batch sets.", @@ -4826,7 +4824,6 @@ "ExecutionRoleArn": "The Amazon Resource Name (ARN) of the execution role that AWS Batch can assume. For jobs that run on Fargate resources, you must provide an execution role. For more information, see [AWS Batch execution IAM role](https://docs.aws.amazon.com/batch/latest/userguide/execution-IAM-role.html) in the *AWS Batch User Guide* .", "FargatePlatformConfiguration": "The platform configuration for jobs that are running on Fargate resources. Jobs that are running on Amazon EC2 resources must not specify this parameter.", "Image": "Required. The image used to start a container. This string is passed directly to the Docker daemon. Images in the Docker Hub registry are available by default. Other repositories are specified with `*repository-url* / *image* : *tag*` . It can be 255 characters long. It can contain uppercase and lowercase letters, numbers, hyphens (-), underscores (_), colons (:), periods (.), forward slashes (/), and number signs (#). This parameter maps to `Image` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/) and the `IMAGE` parameter of [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) .\n\n> Docker image architecture must match the processor architecture of the compute resources that they're scheduled on. For example, ARM-based Docker images can only run on ARM-based compute resources. \n\n- Images in Amazon ECR Public repositories use the full `registry/repository[:tag]` or `registry/repository[@digest]` naming conventions. For example, `public.ecr.aws/ *registry_alias* / *my-web-app* : *latest*` .\n- Images in Amazon ECR repositories use the full registry and repository URI (for example, `123456789012.dkr.ecr..amazonaws.com/` ).\n- Images in official repositories on Docker Hub use a single name (for example, `ubuntu` or `mongo` ).\n- Images in other repositories on Docker Hub are qualified with an organization name (for example, `amazon/amazon-ecs-agent` ).\n- Images in other online repositories are qualified further by a domain name (for example, `quay.io/assemblyline/ubuntu` ).", - "InstanceType": "The instance type to use for a multi-node parallel job. All node groups in a multi-node parallel job must use the same instance type.\n\n> This parameter isn't applicable to single-node container jobs or jobs that run on Fargate resources, and shouldn't be provided.", "JobRoleArn": "The Amazon Resource Name (ARN) of the IAM role that the container can assume for AWS permissions. For more information, see [IAM roles for tasks](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html) in the *Amazon Elastic Container Service Developer Guide* .", "LinuxParameters": "Linux-specific modifications that are applied to the container, such as details for device mappings.", "LogConfiguration": "The log configuration specification for the container.\n\nThis parameter maps to `LogConfig` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/) and the `--log-driver` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) . By default, containers use the same logging driver that the Docker daemon uses. However the container might use a different logging driver than the Docker daemon by specifying a log driver with this parameter in the container definition. To use a different logging driver for a container, the log system must be configured properly on the container instance (or on a different log server for remote logging options). For more information on the options for different supported log drivers, see [Configure logging drivers](https://docs.aws.amazon.com/https://docs.docker.com/engine/admin/logging/overview/) in the Docker documentation.\n\n> AWS Batch currently supports a subset of the logging drivers available to the Docker daemon (shown in the [LogConfiguration](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-logconfiguration.html) data type). \n\nThis parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version | grep \"Server API version\"`\n\n> The Amazon ECS container agent running on a container instance must register the logging drivers available on that instance with the `ECS_AVAILABLE_LOGGING_DRIVERS` environment variable before containers placed on that instance can use these log configuration options. For more information, see [Amazon ECS container agent configuration](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) in the *Amazon Elastic Container Service Developer Guide* .", @@ -4849,6 +4846,17 @@ "HostPath": "The path for the device on the host container instance.", "Permissions": "The explicit permissions to provide to the container for the device. By default, the container has permissions for `read` , `write` , and `mknod` for the device." }, + "AWS::Batch::JobDefinition EFSAuthorizationConfig": { + "AccessPointId": "The Amazon EFS access point ID to use. If an access point is specified, the root directory value specified in the `EFSVolumeConfiguration` must either be omitted or set to `/` which enforces the path set on the EFS access point. If an access point is used, transit encryption must be enabled in the `EFSVolumeConfiguration` . For more information, see [Working with Amazon EFS access points](https://docs.aws.amazon.com/efs/latest/ug/efs-access-points.html) in the *Amazon Elastic File System User Guide* .", + "Iam": "Whether or not to use the AWS Batch job IAM role defined in a job definition when mounting the Amazon EFS file system. If enabled, transit encryption must be enabled in the `EFSVolumeConfiguration` . If this parameter is omitted, the default value of `DISABLED` is used. For more information, see [Using Amazon EFS access points](https://docs.aws.amazon.com/batch/latest/userguide/efs-volumes.html#efs-volume-accesspoints) in the *AWS Batch User Guide* . EFS IAM authorization requires that `TransitEncryption` be `ENABLED` and that a `JobRoleArn` is specified." + }, + "AWS::Batch::JobDefinition EFSVolumeConfiguration": { + "AuthorizationConfig": "The authorization configuration details for the Amazon EFS file system.", + "FileSystemId": "The Amazon EFS file system ID to use.", + "RootDirectory": "The directory within the Amazon EFS file system to mount as the root directory inside the host. If this parameter is omitted, the root of the Amazon EFS volume is used instead. Specifying `/` has the same effect as omitting this parameter. The maximum length is 4,096 characters.\n\n> If an EFS access point is specified in the `authorizationConfig` , the root directory parameter must either be omitted or set to `/` , which enforces the path set on the Amazon EFS access point.", + "TransitEncryption": "Determines whether to enable encryption for Amazon EFS data in transit between the Amazon ECS host and the Amazon EFS server. Transit encryption must be enabled if Amazon EFS IAM authorization is used. If this parameter is omitted, the default value of `DISABLED` is used. For more information, see [Encrypting data in transit](https://docs.aws.amazon.com/efs/latest/ug/encryption-in-transit.html) in the *Amazon Elastic File System User Guide* .", + "TransitEncryptionPort": "The port to use when sending encrypted data between the Amazon ECS host and the Amazon EFS server. If you don't specify a transit encryption port, it uses the port selection strategy that the Amazon EFS mount helper uses. The value must be between 0 and 65,535. For more information, see [EFS mount helper](https://docs.aws.amazon.com/efs/latest/ug/efs-mount-helper.html) in the *Amazon Elastic File System User Guide* ." + }, "AWS::Batch::JobDefinition EcsProperties": { "TaskProperties": "An object that contains the properties for the Amazon ECS task definition of a job.\n\n> This object is currently limited to one task element. However, the task element can run up to 10 containers." }, @@ -4864,13 +4872,6 @@ "TaskRoleArn": "The Amazon Resource Name (ARN) that's associated with the Amazon ECS task.\n\n> This is object is comparable to [ContainerProperties:jobRoleArn](https://docs.aws.amazon.com/batch/latest/APIReference/API_ContainerProperties.html) .", "Volumes": "A list of volumes that are associated with the job." }, - "AWS::Batch::JobDefinition EfsVolumeConfiguration": { - "AuthorizationConfig": "The authorization configuration details for the Amazon EFS file system.", - "FileSystemId": "The Amazon EFS file system ID to use.", - "RootDirectory": "The directory within the Amazon EFS file system to mount as the root directory inside the host. If this parameter is omitted, the root of the Amazon EFS volume is used instead. Specifying `/` has the same effect as omitting this parameter. The maximum length is 4,096 characters.\n\n> If an EFS access point is specified in the `authorizationConfig` , the root directory parameter must either be omitted or set to `/` , which enforces the path set on the Amazon EFS access point.", - "TransitEncryption": "Determines whether to enable encryption for Amazon EFS data in transit between the Amazon ECS host and the Amazon EFS server. Transit encryption must be enabled if Amazon EFS IAM authorization is used. If this parameter is omitted, the default value of `DISABLED` is used. For more information, see [Encrypting data in transit](https://docs.aws.amazon.com/efs/latest/ug/encryption-in-transit.html) in the *Amazon Elastic File System User Guide* .", - "TransitEncryptionPort": "The port to use when sending encrypted data between the Amazon ECS host and the Amazon EFS server. If you don't specify a transit encryption port, it uses the port selection strategy that the Amazon EFS mount helper uses. The value must be between 0 and 65,535. For more information, see [EFS mount helper](https://docs.aws.amazon.com/efs/latest/ug/efs-mount-helper.html) in the *Amazon Elastic File System User Guide* ." - }, "AWS::Batch::JobDefinition EksContainer": { "Args": "An array of arguments to the entrypoint. If this isn't specified, the `CMD` of the container image is used. This corresponds to the `args` member in the [Entrypoint](https://docs.aws.amazon.com/https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#entrypoint) portion of the [Pod](https://docs.aws.amazon.com/https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/) in Kubernetes. Environment variable references are expanded using the container's environment.\n\nIf the referenced environment variable doesn't exist, the reference in the command isn't changed. For example, if the reference is to \" `$(NAME1)` \" and the `NAME1` environment variable doesn't exist, the command string will remain \" `$(NAME1)` .\" `$$` is replaced with `$` , and the resulting string isn't expanded. For example, `$$(VAR_NAME)` is passed as `$(VAR_NAME)` whether or not the `VAR_NAME` environment variable exists. For more information, see [Dockerfile reference: CMD](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/builder/#cmd) and [Define a command and arguments for a pod](https://docs.aws.amazon.com/https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/) in the *Kubernetes documentation* .", "Command": "The entrypoint for the container. This isn't run within a shell. If this isn't specified, the `ENTRYPOINT` of the container image is used. Environment variable references are expanded using the container's environment.\n\nIf the referenced environment variable doesn't exist, the reference in the command isn't changed. For example, if the reference is to \" `$(NAME1)` \" and the `NAME1` environment variable doesn't exist, the command string will remain \" `$(NAME1)` .\" `$$` is replaced with `$` and the resulting string isn't expanded. For example, `$$(VAR_NAME)` will be passed as `$(VAR_NAME)` whether or not the `VAR_NAME` environment variable exists. The entrypoint can't be updated. For more information, see [ENTRYPOINT](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/builder/#entrypoint) in the *Dockerfile reference* and [Define a command and arguments for a container](https://docs.aws.amazon.com/https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/) and [Entrypoint](https://docs.aws.amazon.com/https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#entrypoint) in the *Kubernetes documentation* .", @@ -4910,6 +4911,20 @@ "AWS::Batch::JobDefinition EksHostPath": { "Path": "The path of the file or directory on the host to mount into containers on the pod." }, + "AWS::Batch::JobDefinition EksMetadata": { + "Labels": "Key-value pairs used to identify, sort, and organize cube resources. Can contain up to 63 uppercase letters, lowercase letters, numbers, hyphens (-), and underscores (_). Labels can be added or modified at any time. Each resource can have multiple labels, but each key must be unique for a given object." + }, + "AWS::Batch::JobDefinition EksPodProperties": { + "Containers": "The properties of the container that's used on the Amazon EKS pod.\n\n> This object is limited to 10 elements.", + "DnsPolicy": "The DNS policy for the pod. The default value is `ClusterFirst` . If the `hostNetwork` parameter is not specified, the default is `ClusterFirstWithHostNet` . `ClusterFirst` indicates that any DNS query that does not match the configured cluster domain suffix is forwarded to the upstream nameserver inherited from the node. For more information, see [Pod's DNS policy](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy) in the *Kubernetes documentation* .\n\nValid values: `Default` | `ClusterFirst` | `ClusterFirstWithHostNet`", + "HostNetwork": "Indicates if the pod uses the hosts' network IP address. The default value is `true` . Setting this to `false` enables the Kubernetes pod networking model. Most AWS Batch workloads are egress-only and don't require the overhead of IP allocation for each pod for incoming connections. For more information, see [Host namespaces](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/security/pod-security-policy/#host-namespaces) and [Pod networking](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/workloads/pods/#pod-networking) in the *Kubernetes documentation* .", + "ImagePullSecrets": "References a Kubernetes secret resource. It holds a list of secrets. These secrets help to gain access to pull an images from a private registry.\n\n`ImagePullSecret$name` is required when this object is used.", + "InitContainers": "These containers run before application containers, always runs to completion, and must complete successfully before the next container starts. These containers are registered with the Amazon EKS Connector agent and persists the registration information in the Kubernetes backend data store. For more information, see [Init Containers](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/workloads/pods/init-containers/) in the *Kubernetes documentation* .\n\n> This object is limited to 10 elements.", + "Metadata": "Metadata about the Kubernetes pod. For more information, see [Understanding Kubernetes Objects](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/overview/working-with-objects/kubernetes-objects/) in the *Kubernetes documentation* .", + "ServiceAccountName": "The name of the service account that's used to run the pod. For more information, see [Kubernetes service accounts](https://docs.aws.amazon.com/eks/latest/userguide/service-accounts.html) and [Configure a Kubernetes service account to assume an IAM role](https://docs.aws.amazon.com/eks/latest/userguide/associate-service-account-role.html) in the *Amazon EKS User Guide* and [Configure service accounts for pods](https://docs.aws.amazon.com/https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) in the *Kubernetes documentation* .", + "ShareProcessNamespace": "Indicates if the processes in a container are shared, or visible, to other containers in the same pod. For more information, see [Share Process Namespace between Containers in a Pod](https://docs.aws.amazon.com/https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/) .", + "Volumes": "Specifies the volumes for a job definition that uses Amazon EKS resources." + }, "AWS::Batch::JobDefinition EksProperties": { "PodProperties": "The properties for the Kubernetes pod resources of a job." }, @@ -4939,9 +4954,15 @@ "AWS::Batch::JobDefinition FargatePlatformConfiguration": { "PlatformVersion": "The AWS Fargate platform version where the jobs are running. A platform version is specified only for jobs that are running on Fargate resources. If one isn't specified, the `LATEST` platform version is used by default. This uses a recent, approved version of the AWS Fargate platform for compute resources. For more information, see [AWS Fargate platform versions](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/platform_versions.html) in the *Amazon Elastic Container Service Developer Guide* ." }, + "AWS::Batch::JobDefinition Host": { + "SourcePath": "The path on the host container instance that's presented to the container. If this parameter is empty, then the Docker daemon has assigned a host path for you. If this parameter contains a file location, then the data volume persists at the specified location on the host container instance until you delete it manually. If the source path location doesn't exist on the host container instance, the Docker daemon creates it. If the location does exist, the contents of the source path folder are exported.\n\n> This parameter isn't applicable to jobs that run on Fargate resources. Don't provide this for these jobs." + }, "AWS::Batch::JobDefinition ImagePullSecret": { "Name": "Provides a unique identifier for the `ImagePullSecret` . This object is required when `EksPodProperties$imagePullSecrets` is used." }, + "AWS::Batch::JobDefinition JobTimeout": { + "AttemptDurationSeconds": "The job timeout time (in seconds) that's measured from the job attempt's `startedAt` timestamp. After this time passes, AWS Batch terminates your jobs if they aren't finished. The minimum value for the timeout is 60 seconds.\n\nFor array jobs, the timeout applies to the child jobs, not to the parent array job.\n\nFor multi-node parallel (MNP) jobs, the timeout applies to the whole job, not to the individual nodes." + }, "AWS::Batch::JobDefinition LinuxParameters": { "Devices": "Any of the host devices to expose to the container. This parameter maps to `Devices` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/) and the `--device` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) .\n\n> This parameter isn't applicable to jobs that are running on Fargate resources. Don't provide it for these jobs.", "InitProcessEnabled": "If true, run an `init` process inside the container that forwards signals and reaps processes. This parameter maps to the `--init` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) . This parameter requires version 1.25 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version | grep \"Server API version\"`", @@ -4955,14 +4976,45 @@ "Options": "The configuration options to send to the log driver. This parameter requires version 1.19 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version | grep \"Server API version\"`", "SecretOptions": "The secrets to pass to the log configuration. For more information, see [Specifying sensitive data](https://docs.aws.amazon.com/batch/latest/userguide/specifying-sensitive-data.html) in the *AWS Batch User Guide* ." }, - "AWS::Batch::JobDefinition Metadata": { - "Labels": "Key-value pairs used to identify, sort, and organize cube resources. Can contain up to 63 uppercase letters, lowercase letters, numbers, hyphens (-), and underscores (_). Labels can be added or modified at any time. Each resource can have multiple labels, but each key must be unique for a given object." - }, - "AWS::Batch::JobDefinition MountPoints": { + "AWS::Batch::JobDefinition MountPoint": { "ContainerPath": "The path on the container where the host volume is mounted.", "ReadOnly": "If this value is `true` , the container has read-only access to the volume. Otherwise, the container can write to the volume. The default value is `false` .", "SourceVolume": "The name of the volume to mount." }, + "AWS::Batch::JobDefinition MultiNodeContainerProperties": { + "Command": "", + "Environment": "", + "EphemeralStorage": "", + "ExecutionRoleArn": "", + "Image": "", + "InstanceType": "", + "JobRoleArn": "", + "LinuxParameters": "", + "LogConfiguration": "", + "Memory": "", + "MountPoints": "", + "Privileged": "", + "ReadonlyRootFilesystem": "", + "RepositoryCredentials": "", + "ResourceRequirements": "", + "RuntimePlatform": "", + "Secrets": "", + "Ulimits": "", + "User": "", + "Vcpus": "", + "Volumes": "" + }, + "AWS::Batch::JobDefinition MultiNodeEcsProperties": { + "TaskProperties": "" + }, + "AWS::Batch::JobDefinition MultiNodeEcsTaskProperties": { + "Containers": "", + "ExecutionRoleArn": "", + "IpcMode": "", + "PidMode": "", + "TaskRoleArn": "", + "Volumes": "" + }, "AWS::Batch::JobDefinition NetworkConfiguration": { "AssignPublicIp": "Indicates whether the job has a public IP address. For a job that's running on Fargate resources in a private subnet to send outbound traffic to the internet (for example, to pull container images), the private subnet requires a NAT gateway be attached to route requests to the internet. For more information, see [Amazon ECS task networking](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-networking.html) in the *Amazon Elastic Container Service Developer Guide* . The default value is \" `DISABLED` \"." }, @@ -4978,17 +5030,6 @@ "InstanceTypes": "The instance types of the underlying host infrastructure of a multi-node parallel job.\n\n> This parameter isn't applicable to jobs that are running on Fargate resources.\n> \n> In addition, this list object is currently limited to one element.", "TargetNodes": "The range of nodes, using node index values. A range of `0:3` indicates nodes with index values of `0` through `3` . If the starting range value is omitted ( `:n` ), then `0` is used to start the range. If the ending range value is omitted ( `n:` ), then the highest possible node index is used to end the range. Your accumulative node ranges must account for all nodes ( `0:n` ). You can nest node ranges (for example, `0:10` and `4:5` ). In this case, the `4:5` range properties override the `0:10` properties." }, - "AWS::Batch::JobDefinition PodProperties": { - "Containers": "The properties of the container that's used on the Amazon EKS pod.\n\n> This object is limited to 10 elements.", - "DnsPolicy": "The DNS policy for the pod. The default value is `ClusterFirst` . If the `hostNetwork` parameter is not specified, the default is `ClusterFirstWithHostNet` . `ClusterFirst` indicates that any DNS query that does not match the configured cluster domain suffix is forwarded to the upstream nameserver inherited from the node. For more information, see [Pod's DNS policy](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy) in the *Kubernetes documentation* .\n\nValid values: `Default` | `ClusterFirst` | `ClusterFirstWithHostNet`", - "HostNetwork": "Indicates if the pod uses the hosts' network IP address. The default value is `true` . Setting this to `false` enables the Kubernetes pod networking model. Most AWS Batch workloads are egress-only and don't require the overhead of IP allocation for each pod for incoming connections. For more information, see [Host namespaces](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/security/pod-security-policy/#host-namespaces) and [Pod networking](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/workloads/pods/#pod-networking) in the *Kubernetes documentation* .", - "ImagePullSecrets": "", - "InitContainers": "These containers run before application containers, always runs to completion, and must complete successfully before the next container starts. These containers are registered with the Amazon EKS Connector agent and persists the registration information in the Kubernetes backend data store. For more information, see [Init Containers](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/workloads/pods/init-containers/) in the *Kubernetes documentation* .\n\n> This object is limited to 10 elements.", - "Metadata": "Metadata about the Kubernetes pod. For more information, see [Understanding Kubernetes Objects](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/overview/working-with-objects/kubernetes-objects/) in the *Kubernetes documentation* .", - "ServiceAccountName": "The name of the service account that's used to run the pod. For more information, see [Kubernetes service accounts](https://docs.aws.amazon.com/eks/latest/userguide/service-accounts.html) and [Configure a Kubernetes service account to assume an IAM role](https://docs.aws.amazon.com/eks/latest/userguide/associate-service-account-role.html) in the *Amazon EKS User Guide* and [Configure service accounts for pods](https://docs.aws.amazon.com/https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) in the *Kubernetes documentation* .", - "ShareProcessNamespace": "Indicates if the processes in a container are shared, or visible, to other containers in the same pod. For more information, see [Share Process Namespace between Containers in a Pod](https://docs.aws.amazon.com/https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/) .", - "Volumes": "Specifies the volumes for a job definition that uses Amazon EKS resources." - }, "AWS::Batch::JobDefinition RepositoryCredentials": { "CredentialsParameter": "The Amazon Resource Name (ARN) of the secret containing the private repository credentials." }, @@ -5030,9 +5071,6 @@ "Ulimits": "A list of `ulimits` to set in the container. If a `ulimit` value is specified in a task definition, it overrides the default values set by Docker. This parameter maps to `Ulimits` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--ulimit` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\nAmazon ECS tasks hosted on Fargate use the default resource limit values set by the operating system with the exception of the nofile resource limit parameter which Fargate overrides. The `nofile` resource limit sets a restriction on the number of open files that a container can use. The default `nofile` soft limit is `1024` and the default hard limit is `65535` .\n\nThis parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version `--format '{{.Server.APIVersion}}'`\n\n> This parameter is not supported for Windows containers.", "User": "The user to use inside the container. This parameter maps to User in the Create a container section of the Docker Remote API and the --user option to docker run.\n\n> When running tasks using the `host` network mode, don't run containers using the `root user (UID 0)` . We recommend using a non-root user for better security. \n\nYou can specify the `user` using the following formats. If specifying a UID or GID, you must specify it as a positive integer.\n\n- `user`\n- `user:group`\n- `uid`\n- `uid:gid`\n- `user:gi`\n- `uid:group`\n\n> This parameter is not supported for Windows containers." }, - "AWS::Batch::JobDefinition Timeout": { - "AttemptDurationSeconds": "The job timeout time (in seconds) that's measured from the job attempt's `startedAt` timestamp. After this time passes, AWS Batch terminates your jobs if they aren't finished. The minimum value for the timeout is 60 seconds.\n\nFor array jobs, the timeout applies to the child jobs, not to the parent array job.\n\nFor multi-node parallel (MNP) jobs, the timeout applies to the whole job, not to the individual nodes." - }, "AWS::Batch::JobDefinition Tmpfs": { "ContainerPath": "The absolute file path in the container where the `tmpfs` volume is mounted.", "MountOptions": "The list of `tmpfs` volume mount options.\n\nValid values: \" `defaults` \" | \" `ro` \" | \" `rw` \" | \" `suid` \" | \" `nosuid` \" | \" `dev` \" | \" `nodev` \" | \" `exec` \" | \" `noexec` \" | \" `sync` \" | \" `async` \" | \" `dirsync` \" | \" `remount` \" | \" `mand` \" | \" `nomand` \" | \" `atime` \" | \" `noatime` \" | \" `diratime` \" | \" `nodiratime` \" | \" `bind` \" | \" `rbind\" | \"unbindable\" | \"runbindable\" | \"private\" | \"rprivate\" | \"shared\" | \"rshared\" | \"slave\" | \"rslave\" | \"relatime` \" | \" `norelatime` \" | \" `strictatime` \" | \" `nostrictatime` \" | \" `mode` \" | \" `uid` \" | \" `gid` \" | \" `nr_inodes` \" | \" `nr_blocks` \" | \" `mpol` \"", @@ -5043,14 +5081,11 @@ "Name": "The `type` of the `ulimit` . Valid values are: `core` | `cpu` | `data` | `fsize` | `locks` | `memlock` | `msgqueue` | `nice` | `nofile` | `nproc` | `rss` | `rtprio` | `rttime` | `sigpending` | `stack` .", "SoftLimit": "The soft limit for the `ulimit` type." }, - "AWS::Batch::JobDefinition Volumes": { - "EfsVolumeConfiguration": "This is used when you're using an Amazon Elastic File System file system for job storage. For more information, see [Amazon EFS Volumes](https://docs.aws.amazon.com/batch/latest/userguide/efs-volumes.html) in the *AWS Batch User Guide* .", + "AWS::Batch::JobDefinition Volume": { + "EfsVolumeConfiguration": "This parameter is specified when you're using an Amazon Elastic File System file system for job storage. Jobs that are running on Fargate resources must specify a `platformVersion` of at least `1.4.0` .", "Host": "The contents of the `host` parameter determine whether your data volume persists on the host container instance and where it's stored. If the host parameter is empty, then the Docker daemon assigns a host path for your data volume. However, the data isn't guaranteed to persist after the containers that are associated with it stop running.\n\n> This parameter isn't applicable to jobs that are running on Fargate resources and shouldn't be provided.", "Name": "The name of the volume. It can be up to 255 characters long. It can contain uppercase and lowercase letters, numbers, hyphens (-), and underscores (_). This name is referenced in the `sourceVolume` parameter of container definition `mountPoints` ." }, - "AWS::Batch::JobDefinition VolumesHost": { - "SourcePath": "The path on the host container instance that's presented to the container. If this parameter is empty, then the Docker daemon has assigned a host path for you. If this parameter contains a file location, then the data volume persists at the specified location on the host container instance until you delete it manually. If the source path location doesn't exist on the host container instance, the Docker daemon creates it. If the location does exist, the contents of the source path folder are exported.\n\n> This parameter isn't applicable to jobs that run on Fargate resources. Don't provide this for these jobs." - }, "AWS::Batch::JobQueue": { "ComputeEnvironmentOrder": "The set of compute environments mapped to a job queue and their order relative to each other. The job scheduler uses this parameter to determine which compute environment runs a specific job. Compute environments must be in the `VALID` state before you can associate them with a job queue. You can associate up to three compute environments with a job queue. All of the compute environments must be either EC2 ( `EC2` or `SPOT` ) or Fargate ( `FARGATE` or `FARGATE_SPOT` ); EC2 and Fargate compute environments can't be mixed.\n\n> All compute environments that are associated with a job queue must share the same architecture. AWS Batch doesn't support mixing compute environment architecture types in a single job queue.", "JobQueueName": "The name of the job queue. It can be up to 128 letters long. It can contain uppercase and lowercase letters, numbers, hyphens (-), and underscores (_).", @@ -5127,7 +5162,8 @@ "AWS::Bedrock::Agent Function": { "Description": "A description of the function and its purpose.", "Name": "A name for the function.", - "Parameters": "The parameters that the agent elicits from the user to fulfill the function." + "Parameters": "The parameters that the agent elicits from the user to fulfill the function.", + "RequireConfirmation": "Contains information if user confirmation is required to invoke the function." }, "AWS::Bedrock::Agent FunctionSchema": { "Functions": "A list of functions that each define an action in the action group." @@ -5192,8 +5228,8 @@ "CopyFrom": "The ARN of the model or system-defined inference profile that is the source for the inference profile." }, "AWS::Bedrock::ApplicationInferenceProfile Tag": { - "Key": "Key for the tag.", - "Value": "Value for the tag." + "Key": "The tag's key.", + "Value": "The tag's value." }, "AWS::Bedrock::DataSource": { "DataDeletionPolicy": "The data deletion policy for the data source.", @@ -5204,8 +5240,12 @@ "ServerSideEncryptionConfiguration": "Contains details about the configuration of the server-side encryption.", "VectorIngestionConfiguration": "Contains details about how to ingest the documents in the data source." }, + "AWS::Bedrock::DataSource BedrockDataAutomationConfiguration": { + "ParsingModality": "Specifies whether to enable parsing of multimodal data, including both text and/or images." + }, "AWS::Bedrock::DataSource BedrockFoundationModelConfiguration": { - "ModelArn": "The ARN of the foundation model or [inference profile](https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference.html) .", + "ModelArn": "The ARN of the foundation model or [inference profile](https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference.html) to use for parsing.", + "ParsingModality": "Specifies whether to enable parsing of multimodal data, including both text and/or images.", "ParsingPrompt": "Instructions for interpreting the contents of a document." }, "AWS::Bedrock::DataSource ChunkingConfiguration": { @@ -5258,7 +5298,8 @@ "S3Location": "An S3 bucket path." }, "AWS::Bedrock::DataSource ParsingConfiguration": { - "BedrockFoundationModelConfiguration": "Settings for a foundation model used to parse documents for a data source.", + "BedrockDataAutomationConfiguration": "If you specify `BEDROCK_DATA_AUTOMATION` as the parsing strategy for ingesting your data source, use this object to modify configurations for using the Amazon Bedrock Data Automation parser.", + "BedrockFoundationModelConfiguration": "If you specify `BEDROCK_FOUNDATION_MODEL` as the parsing strategy for ingesting your data source, use this object to modify configurations for using a foundation model to parse documents.", "ParsingStrategy": "The parsing strategy for the data source." }, "AWS::Bedrock::DataSource ParsingPrompt": { @@ -5278,7 +5319,7 @@ "InclusionPrefixes": "A list of S3 prefixes to include certain files or content. For more information, see [Organizing objects using prefixes](https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-prefixes.html) ." }, "AWS::Bedrock::DataSource S3Location": { - "URI": "The location's URI. For example, `s3://my-bucket/chunk-processor/` ." + "URI": "An object URI starting with `s3://` ." }, "AWS::Bedrock::DataSource SalesforceCrawlerConfiguration": { "FilterConfiguration": "The configuration of filtering the Salesforce content. For example, configuring regular expression patterns to include or exclude certain content." @@ -5334,7 +5375,7 @@ "AWS::Bedrock::DataSource VectorIngestionConfiguration": { "ChunkingConfiguration": "Details about how to chunk the documents in the data source. A *chunk* refers to an excerpt from a data source that is returned when the knowledge base that it belongs to is queried.", "CustomTransformationConfiguration": "A custom document transformer for parsed data source documents.", - "ParsingConfiguration": "A custom parser for data source documents." + "ParsingConfiguration": "Configurations for a parser to use for parsing documents in your data source. If you exclude this field, the default parser will be used." }, "AWS::Bedrock::DataSource WebCrawlerConfiguration": { "CrawlerLimits": "The configuration of crawl limits for the web URLs.", @@ -5683,7 +5724,7 @@ }, "AWS::Bedrock::Guardrail PiiEntityConfig": { "Action": "Configure guardrail action when the PII entity is detected.", - "Type": "Configure guardrail type when the PII entity is detected.\n\nThe following PIIs are used to block or mask sensitive information:\n\n- *General*\n\n- *ADDRESS*\n\nA physical address, such as \"100 Main Street, Anytown, USA\" or \"Suite #12, Building 123\". An address can include information such as the street, building, location, city, state, country, county, zip code, precinct, and neighborhood.\n- *AGE*\n\nAn individual's age, including the quantity and unit of time. For example, in the phrase \"I am 40 years old,\" Guarrails recognizes \"40 years\" as an age.\n- *NAME*\n\nAn individual's name. This entity type does not include titles, such as Dr., Mr., Mrs., or Miss. guardrails doesn't apply this entity type to names that are part of organizations or addresses. For example, guardrails recognizes the \"John Doe Organization\" as an organization, and it recognizes \"Jane Doe Street\" as an address.\n- *EMAIL*\n\nAn email address, such as *marymajor@email.com* .\n- *PHONE*\n\nA phone number. This entity type also includes fax and pager numbers.\n- *USERNAME*\n\nA user name that identifies an account, such as a login name, screen name, nick name, or handle.\n- *PASSWORD*\n\nAn alphanumeric string that is used as a password, such as \"* *very20special#pass** \".\n- *DRIVER_ID*\n\nThe number assigned to a driver's license, which is an official document permitting an individual to operate one or more motorized vehicles on a public road. A driver's license number consists of alphanumeric characters.\n- *LICENSE_PLATE*\n\nA license plate for a vehicle is issued by the state or country where the vehicle is registered. The format for passenger vehicles is typically five to eight digits, consisting of upper-case letters and numbers. The format varies depending on the location of the issuing state or country.\n- *VEHICLE_IDENTIFICATION_NUMBER*\n\nA Vehicle Identification Number (VIN) uniquely identifies a vehicle. VIN content and format are defined in the *ISO 3779* specification. Each country has specific codes and formats for VINs.\n- *Finance*\n\n- *REDIT_DEBIT_CARD_CVV*\n\nA three-digit card verification code (CVV) that is present on VISA, MasterCard, and Discover credit and debit cards. For American Express credit or debit cards, the CVV is a four-digit numeric code.\n- *CREDIT_DEBIT_CARD_EXPIRY*\n\nThe expiration date for a credit or debit card. This number is usually four digits long and is often formatted as *month/year* or *MM/YY* . Guardrails recognizes expiration dates such as *01/21* , *01/2021* , and *Jan 2021* .\n- *CREDIT_DEBIT_CARD_NUMBER*\n\nThe number for a credit or debit card. These numbers can vary from 13 to 16 digits in length. However, Amazon Comprehend also recognizes credit or debit card numbers when only the last four digits are present.\n- *PIN*\n\nA four-digit personal identification number (PIN) with which you can access your bank account.\n- *INTERNATIONAL_BANK_ACCOUNT_NUMBER*\n\nAn International Bank Account Number has specific formats in each country. For more information, see [www.iban.com/structure](https://docs.aws.amazon.com/https://www.iban.com/structure) .\n- *SWIFT_CODE*\n\nA SWIFT code is a standard format of Bank Identifier Code (BIC) used to specify a particular bank or branch. Banks use these codes for money transfers such as international wire transfers.\n\nSWIFT codes consist of eight or 11 characters. The 11-digit codes refer to specific branches, while eight-digit codes (or 11-digit codes ending in 'XXX') refer to the head or primary office.\n- *IT*\n\n- *IP_ADDRESS*\n\nAn IPv4 address, such as *198.51.100.0* .\n- *MAC_ADDRESS*\n\nA *media access control* (MAC) address is a unique identifier assigned to a network interface controller (NIC).\n- *URL*\n\nA web address, such as *www.example.com* .\n- *AWS_ACCESS_KEY*\n\nA unique identifier that's associated with a secret access key; you use the access key ID and secret access key to sign programmatic AWS requests cryptographically.\n- *AWS_SECRET_KEY*\n\nA unique identifier that's associated with an access key. You use the access key ID and secret access key to sign programmatic AWS requests cryptographically.\n- *USA specific*\n\n- *US_BANK_ACCOUNT_NUMBER*\n\nA US bank account number, which is typically 10 to 12 digits long.\n- *US_BANK_ROUTING_NUMBER*\n\nA US bank account routing number. These are typically nine digits long,\n- *US_INDIVIDUAL_TAX_IDENTIFICATION_NUMBER*\n\nA US Individual Taxpayer Identification Number (ITIN) is a nine-digit number that starts with a \"9\" and contain a \"7\" or \"8\" as the fourth digit. An ITIN can be formatted with a space or a dash after the third and forth digits.\n- *US_PASSPORT_NUMBER*\n\nA US passport number. Passport numbers range from six to nine alphanumeric characters.\n- *US_SOCIAL_SECURITY_NUMBER*\n\nA US Social Security Number (SSN) is a nine-digit number that is issued to US citizens, permanent residents, and temporary working residents.\n- *Canada specific*\n\n- *CA_HEALTH_NUMBER*\n\nA Canadian Health Service Number is a 10-digit unique identifier, required for individuals to access healthcare benefits.\n- *CA_SOCIAL_INSURANCE_NUMBER*\n\nA Canadian Social Insurance Number (SIN) is a nine-digit unique identifier, required for individuals to access government programs and benefits.\n\nThe SIN is formatted as three groups of three digits, such as *123-456-789* . A SIN can be validated through a simple check-digit process called the [Luhn algorithm](https://docs.aws.amazon.com/https://www.wikipedia.org/wiki/Luhn_algorithm) .\n- *UK Specific*\n\n- *UK_NATIONAL_HEALTH_SERVICE_NUMBER*\n\nA UK National Health Service Number is a 10-17 digit number, such as *485 777 3456* . The current system formats the 10-digit number with spaces after the third and sixth digits. The final digit is an error-detecting checksum.\n- *UK_NATIONAL_INSURANCE_NUMBER*\n\nA UK National Insurance Number (NINO) provides individuals with access to National Insurance (social security) benefits. It is also used for some purposes in the UK tax system.\n\nThe number is nine digits long and starts with two letters, followed by six numbers and one letter. A NINO can be formatted with a space or a dash after the two letters and after the second, forth, and sixth digits.\n- *UK_UNIQUE_TAXPAYER_REFERENCE_NUMBER*\n\nA UK Unique Taxpayer Reference (UTR) is a 10-digit number that identifies a taxpayer or a business.\n- *Custom*\n\n- *Regex filter* - You can use a regular expressions to define patterns for a guardrail to recognize and act upon such as serial number, booking ID etc.." + "Type": "Configure guardrail type when the PII entity is detected.\n\nThe following PIIs are used to block or mask sensitive information:\n\n- *General*\n\n- *ADDRESS*\n\nA physical address, such as \"100 Main Street, Anytown, USA\" or \"Suite #12, Building 123\". An address can include information such as the street, building, location, city, state, country, county, zip code, precinct, and neighborhood.\n- *AGE*\n\nAn individual's age, including the quantity and unit of time. For example, in the phrase \"I am 40 years old,\" Guardrails recognizes \"40 years\" as an age.\n- *NAME*\n\nAn individual's name. This entity type does not include titles, such as Dr., Mr., Mrs., or Miss. guardrails doesn't apply this entity type to names that are part of organizations or addresses. For example, guardrails recognizes the \"John Doe Organization\" as an organization, and it recognizes \"Jane Doe Street\" as an address.\n- *EMAIL*\n\nAn email address, such as *marymajor@email.com* .\n- *PHONE*\n\nA phone number. This entity type also includes fax and pager numbers.\n- *USERNAME*\n\nA user name that identifies an account, such as a login name, screen name, nick name, or handle.\n- *PASSWORD*\n\nAn alphanumeric string that is used as a password, such as \"* *very20special#pass** \".\n- *DRIVER_ID*\n\nThe number assigned to a driver's license, which is an official document permitting an individual to operate one or more motorized vehicles on a public road. A driver's license number consists of alphanumeric characters.\n- *LICENSE_PLATE*\n\nA license plate for a vehicle is issued by the state or country where the vehicle is registered. The format for passenger vehicles is typically five to eight digits, consisting of upper-case letters and numbers. The format varies depending on the location of the issuing state or country.\n- *VEHICLE_IDENTIFICATION_NUMBER*\n\nA Vehicle Identification Number (VIN) uniquely identifies a vehicle. VIN content and format are defined in the *ISO 3779* specification. Each country has specific codes and formats for VINs.\n- *Finance*\n\n- *CREDIT_DEBIT_CARD_CVV*\n\nA three-digit card verification code (CVV) that is present on VISA, MasterCard, and Discover credit and debit cards. For American Express credit or debit cards, the CVV is a four-digit numeric code.\n- *CREDIT_DEBIT_CARD_EXPIRY*\n\nThe expiration date for a credit or debit card. This number is usually four digits long and is often formatted as *month/year* or *MM/YY* . Guardrails recognizes expiration dates such as *01/21* , *01/2021* , and *Jan 2021* .\n- *CREDIT_DEBIT_CARD_NUMBER*\n\nThe number for a credit or debit card. These numbers can vary from 13 to 16 digits in length. However, Amazon Comprehend also recognizes credit or debit card numbers when only the last four digits are present.\n- *PIN*\n\nA four-digit personal identification number (PIN) with which you can access your bank account.\n- *INTERNATIONAL_BANK_ACCOUNT_NUMBER*\n\nAn International Bank Account Number has specific formats in each country. For more information, see [www.iban.com/structure](https://docs.aws.amazon.com/https://www.iban.com/structure) .\n- *SWIFT_CODE*\n\nA SWIFT code is a standard format of Bank Identifier Code (BIC) used to specify a particular bank or branch. Banks use these codes for money transfers such as international wire transfers.\n\nSWIFT codes consist of eight or 11 characters. The 11-digit codes refer to specific branches, while eight-digit codes (or 11-digit codes ending in 'XXX') refer to the head or primary office.\n- *IT*\n\n- *IP_ADDRESS*\n\nAn IPv4 address, such as *198.51.100.0* .\n- *MAC_ADDRESS*\n\nA *media access control* (MAC) address is a unique identifier assigned to a network interface controller (NIC).\n- *URL*\n\nA web address, such as *www.example.com* .\n- *AWS_ACCESS_KEY*\n\nA unique identifier that's associated with a secret access key; you use the access key ID and secret access key to sign programmatic AWS requests cryptographically.\n- *AWS_SECRET_KEY*\n\nA unique identifier that's associated with an access key. You use the access key ID and secret access key to sign programmatic AWS requests cryptographically.\n- *USA specific*\n\n- *US_BANK_ACCOUNT_NUMBER*\n\nA US bank account number, which is typically 10 to 12 digits long.\n- *US_BANK_ROUTING_NUMBER*\n\nA US bank account routing number. These are typically nine digits long,\n- *US_INDIVIDUAL_TAX_IDENTIFICATION_NUMBER*\n\nA US Individual Taxpayer Identification Number (ITIN) is a nine-digit number that starts with a \"9\" and contain a \"7\" or \"8\" as the fourth digit. An ITIN can be formatted with a space or a dash after the third and forth digits.\n- *US_PASSPORT_NUMBER*\n\nA US passport number. Passport numbers range from six to nine alphanumeric characters.\n- *US_SOCIAL_SECURITY_NUMBER*\n\nA US Social Security Number (SSN) is a nine-digit number that is issued to US citizens, permanent residents, and temporary working residents.\n- *Canada specific*\n\n- *CA_HEALTH_NUMBER*\n\nA Canadian Health Service Number is a 10-digit unique identifier, required for individuals to access healthcare benefits.\n- *CA_SOCIAL_INSURANCE_NUMBER*\n\nA Canadian Social Insurance Number (SIN) is a nine-digit unique identifier, required for individuals to access government programs and benefits.\n\nThe SIN is formatted as three groups of three digits, such as *123-456-789* . A SIN can be validated through a simple check-digit process called the [Luhn algorithm](https://docs.aws.amazon.com/https://www.wikipedia.org/wiki/Luhn_algorithm) .\n- *UK Specific*\n\n- *UK_NATIONAL_HEALTH_SERVICE_NUMBER*\n\nA UK National Health Service Number is a 10-17 digit number, such as *485 777 3456* . The current system formats the 10-digit number with spaces after the third and sixth digits. The final digit is an error-detecting checksum.\n- *UK_NATIONAL_INSURANCE_NUMBER*\n\nA UK National Insurance Number (NINO) provides individuals with access to National Insurance (social security) benefits. It is also used for some purposes in the UK tax system.\n\nThe number is nine digits long and starts with two letters, followed by six numbers and one letter. A NINO can be formatted with a space or a dash after the two letters and after the second, forth, and sixth digits.\n- *UK_UNIQUE_TAXPAYER_REFERENCE_NUMBER*\n\nA UK Unique Taxpayer Reference (UTR) is a 10-digit number that identifies a taxpayer or a business.\n- *Custom*\n\n- *Regex filter* - You can use a regular expressions to define patterns for a guardrail to recognize and act upon such as serial number, booking ID etc.." }, "AWS::Bedrock::Guardrail RegexConfig": { "Action": "The guardrail action to configure when matching regular expression is detected.", @@ -5696,8 +5737,8 @@ "RegexesConfig": "A list of regular expressions to configure to the guardrail." }, "AWS::Bedrock::Guardrail Tag": { - "Key": "Key for the tag.", - "Value": "Value for the tag." + "Key": "The tag's key.", + "Value": "The tag's value." }, "AWS::Bedrock::Guardrail TopicConfig": { "Definition": "A definition of the topic to deny.", @@ -5733,7 +5774,11 @@ "AWS::Bedrock::KnowledgeBase EmbeddingModelConfiguration": { "BedrockEmbeddingModelConfiguration": "The vector configuration details on the Bedrock embeddings model." }, + "AWS::Bedrock::KnowledgeBase KendraKnowledgeBaseConfiguration": { + "KendraIndexArn": "The ARN of the Amazon Kendra index." + }, "AWS::Bedrock::KnowledgeBase KnowledgeBaseConfiguration": { + "KendraKnowledgeBaseConfiguration": "Settings for an Amazon Kendra knowledge base.", "Type": "The type of data that the data source is converted into for the knowledge base.", "VectorKnowledgeBaseConfiguration": "Contains details about the model that's used to convert the data source into vector embeddings." }, @@ -5784,6 +5829,9 @@ "TextField": "The name of the field in which Amazon Bedrock stores the raw text from your data. The text is split according to the chunking strategy you choose.", "VectorField": "The name of the field in which Amazon Bedrock stores the vector embeddings for your data sources." }, + "AWS::Bedrock::KnowledgeBase S3Location": { + "URI": "An object URI starting with `s3://` ." + }, "AWS::Bedrock::KnowledgeBase StorageConfiguration": { "MongoDbAtlasConfiguration": "Contains the storage configuration of the knowledge base in MongoDB Atlas.", "OpensearchServerlessConfiguration": "Contains the storage configuration of the knowledge base in Amazon OpenSearch Service.", @@ -5791,9 +5839,17 @@ "RdsConfiguration": "Contains details about the storage configuration of the knowledge base in Amazon RDS. For more information, see [Create a vector index in Amazon RDS](https://docs.aws.amazon.com/bedrock/latest/userguide/knowledge-base-setup-rds.html) .", "Type": "The vector store service in which the knowledge base is stored." }, + "AWS::Bedrock::KnowledgeBase SupplementalDataStorageConfiguration": { + "SupplementalDataStorageLocations": "" + }, + "AWS::Bedrock::KnowledgeBase SupplementalDataStorageLocation": { + "S3Location": "Contains information about the Amazon S3 location for the extracted images.", + "SupplementalDataStorageLocationType": "" + }, "AWS::Bedrock::KnowledgeBase VectorKnowledgeBaseConfiguration": { "EmbeddingModelArn": "The Amazon Resource Name (ARN) of the model or inference profile used to create vector embeddings for the knowledge base.", - "EmbeddingModelConfiguration": "The embeddings model configuration details for the vector model used in Knowledge Base." + "EmbeddingModelConfiguration": "The embeddings model configuration details for the vector model used in Knowledge Base.", + "SupplementalDataStorageConfiguration": "If you include multimodal data from your data source, use this object to specify configurations for the storage location of the images extracted from your documents. These images can be retrieved and returned to the end user. They can also be used in generation when using [RetrieveAndGenerate](https://docs.aws.amazon.com/bedrock/latest/APIReference/API_agent-runtime_RetrieveAndGenerate.html) ." }, "AWS::Bedrock::Prompt": { "CustomerEncryptionKeyArn": "The Amazon Resource Name (ARN) of the KMS key that the prompt is encrypted with.", @@ -5803,6 +5859,25 @@ "Tags": "Metadata that you can assign to a resource as key-value pairs. For more information, see the following resources:\n\n- [Tag naming limits and requirements](https://docs.aws.amazon.com/tag-editor/latest/userguide/tagging.html#tag-conventions)\n- [Tagging best practices](https://docs.aws.amazon.com/tag-editor/latest/userguide/tagging.html#tag-best-practices)", "Variants": "A list of objects, each containing details about a variant of the prompt." }, + "AWS::Bedrock::Prompt ChatPromptTemplateConfiguration": { + "InputVariables": "An array of the variables in the prompt template.", + "Messages": "Contains messages in the chat for the prompt.", + "System": "Contains system prompts to provide context to the model or to describe how it should behave.", + "ToolConfiguration": "Configuration information for the tools that the model can use when generating a response." + }, + "AWS::Bedrock::Prompt ContentBlock": { + "Text": "Text to include in the message." + }, + "AWS::Bedrock::Prompt Message": { + "Content": "The message content. Note the following restrictions:\n\n- You can include up to 20 images. Each image's size, height, and width must be no more than 3.75 MB, 8000 px, and 8000 px, respectively.\n- You can include up to five documents. Each document's size must be no more than 4.5 MB.\n- If you include a `ContentBlock` with a `document` field in the array, you must also include a `ContentBlock` with a `text` field.\n- You can only include images and documents if the `role` is `user` .", + "Role": "The role that the message plays in the message." + }, + "AWS::Bedrock::Prompt PromptAgentResource": { + "AgentIdentifier": "The ARN of the agent with which to use the prompt." + }, + "AWS::Bedrock::Prompt PromptGenAiResource": { + "Agent": "Specifies an Amazon Bedrock agent with which to use the prompt." + }, "AWS::Bedrock::Prompt PromptInferenceConfiguration": { "Text": "Contains inference configurations for a text prompt." }, @@ -5816,15 +5891,23 @@ "TopP": "The percentage of most-likely candidates that the model considers for the next token." }, "AWS::Bedrock::Prompt PromptTemplateConfiguration": { + "Chat": "Contains configurations to use the prompt in a conversational format.", "Text": "Contains configurations for the text in a message for a prompt." }, "AWS::Bedrock::Prompt PromptVariant": { + "GenAiResource": "Specifies a generative AI resource with which to use the prompt.", "InferenceConfiguration": "Contains inference configurations for the prompt variant.", "ModelId": "The unique identifier of the model or [inference profile](https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference.html) with which to run inference on the prompt.", "Name": "The name of the prompt variant.", "TemplateConfiguration": "Contains configurations for the prompt template.", "TemplateType": "The type of prompt template to use." }, + "AWS::Bedrock::Prompt SpecificToolChoice": { + "Name": "The name of the tool that the model must request." + }, + "AWS::Bedrock::Prompt SystemContentBlock": { + "Text": "A system prompt for the model." + }, "AWS::Bedrock::Prompt TextPromptTemplateConfiguration": { "InputVariables": "An array of the variables in the prompt template.", "Text": "The message for the prompt.", @@ -5835,6 +5918,26 @@ "Key": "The object key for the Amazon S3 location.", "Version": "The version of the Amazon S3 location to use." }, + "AWS::Bedrock::Prompt Tool": { + "ToolSpec": "The specfication for the tool." + }, + "AWS::Bedrock::Prompt ToolChoice": { + "Any": "The model must request at least one tool (no text is generated).", + "Auto": "(Default). The Model automatically decides if a tool should be called or whether to generate text instead.", + "Tool": "The Model must request the specified tool. Only supported by Anthropic Claude 3 models." + }, + "AWS::Bedrock::Prompt ToolConfiguration": { + "ToolChoice": "If supported by model, forces the model to request a tool.", + "Tools": "An array of tools that you want to pass to a model." + }, + "AWS::Bedrock::Prompt ToolInputSchema": { + "Json": "The JSON schema for the tool. For more information, see [JSON Schema Reference](https://docs.aws.amazon.com/https://json-schema.org/understanding-json-schema/reference) ." + }, + "AWS::Bedrock::Prompt ToolSpecification": { + "Description": "The description for the tool.", + "InputSchema": "The input schema for the tool in JSON format.", + "Name": "The name for the tool." + }, "AWS::Bedrock::PromptVersion": { "Description": "The description of the prompt version.", "PromptArn": "The Amazon Resource Name (ARN) of the version of the prompt.", @@ -6109,6 +6212,7 @@ "TimeUnit": "The granularity of the line items in the report." }, "AWS::Cassandra::Keyspace": { + "ClientSideTimestampsEnabled": "Indicates whether client-side timestamps are enabled (true) or disabled (false) for all tables in the keyspace. To add a Region to a single-Region keyspace with at least one table, the value must be set to true. After you've enabled client-side timestamps for a table, you can\u2019t disable it again.", "KeyspaceName": "The name of the keyspace to be created. The keyspace name is case sensitive. If you don't specify a name, AWS CloudFormation generates a unique ID and uses that ID for the keyspace name. For more information, see [Name type](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-name.html) .\n\n*Length constraints:* Minimum length of 3. Maximum length of 255.\n\n*Pattern:* `^[a-zA-Z0-9][a-zA-Z0-9_]{1,47}$`", "ReplicationSpecification": "Specifies the `ReplicationStrategy` of a keyspace. The options are:\n\n- `SINGLE_REGION` for a single Region keyspace (optional) or\n- `MULTI_REGION` for a multi-Region keyspace\n\nIf no `ReplicationStrategy` is provided, the default is `SINGLE_REGION` . If you choose `MULTI_REGION` , you must also provide a `RegionList` with the AWS Regions that the keyspace is replicated in.", "Tags": "An array of key-value pairs to apply to this resource.\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html) ." @@ -6184,6 +6288,15 @@ "ScaleOutCooldown": "Specifies a scale out cool down period.\n\nA cooldown period in seconds between scaling activities that lets the table stabilize before another scaling activity starts.", "TargetValue": "Specifies the target value for the target tracking auto scaling policy.\n\nAmazon Keyspaces auto scaling scales up capacity automatically when traffic exceeds this target utilization rate, and then back down when it falls below the target. This ensures that the ratio of consumed capacity to provisioned capacity stays at or near this value. You define `targetValue` as a percentage. An `integer` between 20 and 90." }, + "AWS::Cassandra::Type": { + "Fields": "A list of fields that define this type.", + "KeyspaceName": "The name of the keyspace to create the type in. The keyspace must already exist.", + "TypeName": "The name of the user-defined type. UDT names must contain 48 characters or less, must begin with an alphabetic character, and can only contain alpha-numeric characters and underscores. Amazon Keyspaces converts upper case characters automatically into lower case characters. For more information, see [Create a user-defined type (UDT) in Amazon Keyspaces](https://docs.aws.amazon.com/keyspaces/latest/devguide/keyspaces-create-udt.html) in the *Amazon Keyspaces Developer Guide* ." + }, + "AWS::Cassandra::Type Field": { + "FieldName": "The name of the field.", + "FieldType": "The data type of the field. This can be any Cassandra data type or another user-defined type." + }, "AWS::CertificateManager::Account": { "ExpiryEventsConfiguration": "Object containing expiration events options associated with an AWS account . For more information, see [ExpiryEventsConfiguration](https://docs.aws.amazon.com/acm/latest/APIReference/API_ExpiryEventsConfiguration.html) in the API reference." }, @@ -6331,7 +6444,7 @@ "AnalysisRules": "The analysis rule that was created for the configured table.", "Description": "A description for the configured table.", "Name": "A name for the configured table.", - "TableReference": "The AWS Glue table that this configured table represents.", + "TableReference": "The table that this configured table represents.", "Tags": "An optional label that you can assign to a resource when you create it. Each tag consists of a key and an optional value, both of which you define. When you use tagging, you can also use tag-based access control in IAM policies to control access to this resource." }, "AWS::CleanRooms::ConfiguredTable AggregateColumn": { @@ -6370,6 +6483,12 @@ "JoinColumns": "Columns that can be used to join a configured table with the table of the member who can query and other members' configured tables.", "ListColumns": "Columns that can be listed in the output." }, + "AWS::CleanRooms::ConfiguredTable AthenaTableReference": { + "DatabaseName": "The database name.", + "OutputLocation": "The output location for the Athena table.", + "TableName": "The table reference.", + "WorkGroup": "The workgroup of the Athena table reference." + }, "AWS::CleanRooms::ConfiguredTable ConfiguredTableAnalysisRulePolicy": { "V1": "Controls on the query specifications that can be run on a configured table." }, @@ -6388,8 +6507,25 @@ "DatabaseName": "The name of the database the AWS Glue table belongs to.", "TableName": "The name of the AWS Glue table." }, + "AWS::CleanRooms::ConfiguredTable SnowflakeTableReference": { + "AccountIdentifier": "The account identifier for the Snowflake table reference.", + "DatabaseName": "The name of the database the Snowflake table belongs to.", + "SchemaName": "The schema name of the Snowflake table reference.", + "SecretArn": "The secret ARN of the Snowflake table reference.", + "TableName": "The name of the Snowflake table.", + "TableSchema": "The schema of the Snowflake table." + }, + "AWS::CleanRooms::ConfiguredTable SnowflakeTableSchema": { + "V1": "The schema of a Snowflake table." + }, + "AWS::CleanRooms::ConfiguredTable SnowflakeTableSchemaV1": { + "ColumnName": "The column name.", + "ColumnType": "The column's data type. Supported data types: `ARRAY` , `BIGINT` , `BOOLEAN` , `CHAR` , `DATE` , `DECIMAL` , `DOUBLE` , `DOUBLE PRECISION` , `FLOAT` , `FLOAT4` , `INT` , `INTEGER` , `MAP` , `NUMERIC` , `NUMBER` , `REAL` , `SMALLINT` , `STRING` , `TIMESTAMP` , `TIMESTAMP_LTZ` , `TIMESTAMP_NTZ` , `DATETIME` , `TINYINT` , `VARCHAR` , `TEXT` , `CHARACTER` ." + }, "AWS::CleanRooms::ConfiguredTable TableReference": { - "Glue": "If present, a reference to the AWS Glue table referred to by this table reference." + "Athena": "If present, a reference to the Athena table referred to by this table reference.", + "Glue": "If present, a reference to the AWS Glue table referred to by this table reference.", + "Snowflake": "If present, a reference to the Snowflake table referred to by this table reference." }, "AWS::CleanRooms::ConfiguredTable Tag": { "Key": "The key of the tag.", @@ -6513,7 +6649,7 @@ "MembershipIdentifier": "The identifier for a membership resource.", "Parameters": "Specifies the epsilon and noise parameters for the privacy budget template.", "PrivacyBudgetType": "Specifies the type of the privacy budget template.", - "Tags": "" + "Tags": "An optional label that you can assign to a resource when you create it. Each tag consists of a key and an optional value, both of which you define. When you use tagging, you can also use tag-based access control in IAM policies to control access to this resource." }, "AWS::CleanRooms::PrivacyBudgetTemplate Parameters": { "Epsilon": "The epsilon value that you want to use.", @@ -6625,14 +6761,14 @@ }, "AWS::CloudFormation::HookTypeConfig": { "Configuration": "Specifies the activated Hook type configuration, in this AWS account and AWS Region .\n\nYou must specify either `TypeName` and `Configuration` or `TypeArn` and `Configuration` .", - "ConfigurationAlias": "Specifies the activated Hook type configuration, in this AWS account and AWS Region .\n\nDefaults to `default` alias. Hook types currently support default configuration alias.", + "ConfigurationAlias": "An alias by which to refer to this configuration data.\n\nDefaults to `default` alias. Hook types currently support default configuration alias.", "TypeArn": "The Amazon Resource Number (ARN) for the Hook to set `Configuration` for.\n\nYou must specify either `TypeName` and `Configuration` or `TypeArn` and `Configuration` .", "TypeName": "The unique name for your Hook. Specifies a three-part namespace for your Hook, with a recommended pattern of `Organization::Service::Hook` .\n\nYou must specify either `TypeName` and `Configuration` or `TypeArn` and `Configuration` ." }, "AWS::CloudFormation::HookVersion": { "ExecutionRoleArn": "The Amazon Resource Name (ARN) of the task execution role that grants the Hook permission.", "LoggingConfig": "Contains logging configuration information for an extension.", - "SchemaHandlerPackage": "A URL to the Amazon S3 bucket containing the Hook project package that contains the necessary files for the Hook you want to register.\n\nFor information on generating a schema handler package for the resource you want to register, see [submit](https://docs.aws.amazon.com/cloudformation-cli/latest/userguide/resource-type-cli-submit.html) in the *CloudFormation CLI User Guide for Extension Development* .\n\n> The user registering the resource must be able to access the package in the S3 bucket. That's, the user must have [GetObject](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) permissions for the schema handler package. For more information, see [Actions, Resources, and Condition Keys for Amazon S3](https://docs.aws.amazon.com/IAM/latest/UserGuide/list_amazons3.html) in the *AWS Identity and Access Management User Guide* .", + "SchemaHandlerPackage": "A URL to the Amazon S3 bucket containing the Hook project package that contains the necessary files for the Hook you want to register.\n\nFor information on generating a schema handler package, see [Modeling custom CloudFormation Hooks](https://docs.aws.amazon.com/cloudformation-cli/latest/hooks-userguide/hooks-model.html) in the *AWS CloudFormation Hooks User Guide* .\n\n> To register the Hook, you must have `s3:GetObject` permissions to access the S3 objects.", "TypeName": "The unique name for your hook. Specifies a three-part namespace for your hook, with a recommended pattern of `Organization::Service::Hook` .\n\n> The following organization namespaces are reserved and can't be used in your hook type names:\n> \n> - `Alexa`\n> - `AMZN`\n> - `Amazon`\n> - `ASK`\n> - `AWS`\n> - `Custom`\n> - `Dev`" }, "AWS::CloudFormation::HookVersion LoggingConfig": { @@ -6672,9 +6808,9 @@ }, "AWS::CloudFormation::Macro": { "Description": "A description of the macro.", - "FunctionName": "The Amazon Resource Name (ARN) of the underlying AWS Lambda function that you want AWS CloudFormation to invoke when the macro is run.", - "LogGroupName": "The CloudWatch Logs group to which AWS CloudFormation sends error logging information when invoking the macro's underlying AWS Lambda function.", - "LogRoleARN": "The ARN of the role AWS CloudFormation should assume when sending log entries to CloudWatch Logs .", + "FunctionName": "The Amazon Resource Name (ARN) of the underlying Lambda function that you want CloudFormation to invoke when the macro is run.", + "LogGroupName": "The CloudWatch Logs group to which CloudFormation sends error logging information when invoking the macro's underlying Lambda function.", + "LogRoleARN": "The ARN of the role CloudFormation should assume when sending log entries to CloudWatch Logs .", "Name": "The name of the macro. The name of the macro must be unique across all macros in the account." }, "AWS::CloudFormation::ModuleDefaultVersion": { @@ -6684,11 +6820,11 @@ }, "AWS::CloudFormation::ModuleVersion": { "ModuleName": "The name of the module being registered.", - "ModulePackage": "A URL to the S3 bucket containing the package that contains the template fragment and schema files for the module version to register.\n\n> The user registering the module version must be able to access the module package in the S3 bucket. That's, the user needs to have [GetObject](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) permissions for the package. For more information, see [Actions, Resources, and Condition Keys for Amazon S3](https://docs.aws.amazon.com/IAM/latest/UserGuide/list_amazons3.html) in the *AWS Identity and Access Management User Guide* ." + "ModulePackage": "A URL to the S3 bucket containing the package that contains the template fragment and schema files for the module version to register.\n\nFor more information, see [Module structure and requirements](https://docs.aws.amazon.com/cloudformation-cli/latest/userguide/modules-structure.html) in the *AWS CloudFormation Command Line Interface (CLI) User Guide* .\n\n> To register the module version, you must have `s3:GetObject` permissions to access the S3 objects." }, "AWS::CloudFormation::PublicTypeVersion": { "Arn": "The Amazon Resource Number (ARN) of the extension.\n\nConditional: You must specify `Arn` , or `TypeName` and `Type` .", - "LogDeliveryBucket": "The S3 bucket to which CloudFormation delivers the contract test execution logs.\n\nCloudFormation delivers the logs by the time contract testing has completed and the extension has been assigned a test type status of `PASSED` or `FAILED` .\n\nThe user initiating the stack operation must be able to access items in the specified S3 bucket. Specifically, the user needs the following permissions:\n\n- GetObject\n- PutObject\n\nFor more information, see [Actions, Resources, and Condition Keys for Amazon S3](https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazons3.html) in the *AWS Identity and Access Management User Guide* .", + "LogDeliveryBucket": "The S3 bucket to which CloudFormation delivers the contract test execution logs.\n\nCloudFormation delivers the logs by the time contract testing has completed and the extension has been assigned a test type status of `PASSED` or `FAILED` .\n\nThe user initiating the stack operation must be able to access items in the specified S3 bucket. Specifically, the user needs the following permissions:\n\n- s3:GetObject\n- s3:PutObject", "PublicVersionNumber": "The version number to assign to this version of the extension.\n\nUse the following format, and adhere to semantic versioning when assigning a version number to your extension:\n\n`MAJOR.MINOR.PATCH`\n\nFor more information, see [Semantic Versioning 2.0.0](https://docs.aws.amazon.com/https://semver.org/) .\n\nIf you don't specify a version number, CloudFormation increments the version number by one minor version release.\n\nYou cannot specify a version number the first time you publish a type. CloudFormation automatically sets the first version number to be `1.0.0` .", "Type": "The type of the extension to test.\n\nConditional: You must specify `Arn` , or `TypeName` and `Type` .", "TypeName": "The name of the extension to test.\n\nConditional: You must specify `Arn` , or `TypeName` and `Type` ." @@ -6703,9 +6839,9 @@ "VersionId": "The ID of a specific version of the resource. The version ID is the value at the end of the Amazon Resource Name (ARN) assigned to the resource version when it's registered.\n\nConditional: You must specify either `TypeVersionArn` , or `TypeName` and `VersionId` ." }, "AWS::CloudFormation::ResourceVersion": { - "ExecutionRoleArn": "The Amazon Resource Name (ARN) of the IAM role for CloudFormation to assume when invoking the resource. If your resource calls AWS APIs in any of its handlers, you must create an *[IAM execution role](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html)* that includes the necessary permissions to call those AWS APIs, and provision that execution role in your account. When CloudFormation needs to invoke the resource type handler, CloudFormation assumes this execution role to create a temporary session token, which it then passes to the resource type handler, thereby supplying your resource type with the appropriate credentials.", + "ExecutionRoleArn": "The Amazon Resource Name (ARN) of the IAM role for CloudFormation to assume when invoking the resource. If your resource calls AWS APIs in any of its handlers, you must create an IAM execution role that includes the necessary permissions to call those AWS APIs, and provision that execution role in your account. When CloudFormation needs to invoke the resource type handler, CloudFormation assumes this execution role to create a temporary session token, which it then passes to the resource type handler, thereby supplying your resource type with the appropriate credentials.", "LoggingConfig": "Logging configuration information for a resource.", - "SchemaHandlerPackage": "A URL to the S3 bucket containing the resource project package that contains the necessary files for the resource you want to register.\n\nFor information on generating a schema handler package for the resource you want to register, see [submit](https://docs.aws.amazon.com/cloudformation-cli/latest/userguide/resource-type-cli-submit.html) in the *CloudFormation CLI User Guide* .\n\n> The user registering the resource must be able to access the package in the S3 bucket. That is, the user needs to have [GetObject](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) permissions for the schema handler package. For more information, see [Actions, Resources, and Condition Keys for Amazon S3](https://docs.aws.amazon.com/IAM/latest/UserGuide/list_amazons3.html) in the *AWS Identity and Access Management User Guide* .", + "SchemaHandlerPackage": "A URL to the S3 bucket containing the resource project package that contains the necessary files for the resource you want to register.\n\nFor information on generating a schema handler package, see [Modeling resource types to use with AWS CloudFormation](https://docs.aws.amazon.com/cloudformation-cli/latest/userguide/resource-type-model.html) in the *AWS CloudFormation Command Line Interface (CLI) User Guide* .\n\n> To register the resource version, you must have `s3:GetObject` permissions to access the S3 objects.", "TypeName": "The name of the resource being registered.\n\nWe recommend that resource names adhere to the following pattern: *company_or_organization* :: *service* :: *type* .\n\n> The following organization namespaces are reserved and can't be used in your resource names:\n> \n> - `Alexa`\n> - `AMZN`\n> - `Amazon`\n> - `AWS`\n> - `Custom`\n> - `Dev`" }, "AWS::CloudFormation::ResourceVersion LoggingConfig": { @@ -6713,28 +6849,28 @@ "LogRoleArn": "The ARN of the role that CloudFormation should assume when sending log entries to CloudWatch logs." }, "AWS::CloudFormation::Stack": { - "Capabilities": "In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for AWS CloudFormation to create the stack.\n\n- `CAPABILITY_IAM` and `CAPABILITY_NAMED_IAM`\n\nSome stack templates might include resources that can affect permissions in your AWS account ; for example, by creating new AWS Identity and Access Management (IAM) users. For those stacks, you must explicitly acknowledge this by specifying one of these capabilities.\n\nThe following IAM resources require you to specify either the `CAPABILITY_IAM` or `CAPABILITY_NAMED_IAM` capability.\n\n- If you have IAM resources, you can specify either capability.\n- If you have IAM resources with custom names, you *must* specify `CAPABILITY_NAMED_IAM` .\n- If you don't specify either of these capabilities, AWS CloudFormation returns an `InsufficientCapabilities` error.\n\nIf your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary.\n\n- [`AWS::IAM::AccessKey`](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-accesskey.html)\n- [`AWS::IAM::Group`](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-group.html)\n- [`AWS::IAM::InstanceProfile`](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-instanceprofile.html)\n- [`AWS::IAM::Policy`](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-policy.html)\n- [`AWS::IAM::Role`](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-role.html)\n- [`AWS::IAM::User`](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-user.html)\n- [`AWS::IAM::UserToGroupAddition`](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-addusertogroup.html)\n\nFor more information, see [Acknowledging IAM Resources in AWS CloudFormation Templates](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-iam-template.html#capabilities) .\n- `CAPABILITY_AUTO_EXPAND`\n\nSome template contain macros. Macros perform custom processing on templates; this can include simple actions like find-and-replace operations, all the way to extensive transformations of entire templates. Because of this, users typically create a change set from the processed template, so that they can review the changes resulting from the macros before actually creating the stack. If your stack template contains one or more macros, and you choose to create a stack directly from the processed template, without first reviewing the resulting changes in a change set, you must acknowledge this capability. This includes the [AWS::Include](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/create-reusable-transform-function-snippets-and-add-to-your-template-with-aws-include-transform.html) and [AWS::Serverless](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/transform-aws-serverless.html) transforms, which are macros hosted by AWS CloudFormation .\n\nIf you want to create a stack from a stack template that contains macros *and* nested stacks, you must create the stack directly from the template using this capability.\n\n> You should only create stacks directly from a stack template that contains macros if you know what processing the macro performs.\n> \n> Each macro relies on an underlying Lambda service function for processing stack templates. Be aware that the Lambda function owner can update the function operation without AWS CloudFormation being notified. \n\nFor more information, see [Using AWS CloudFormation macros to perform custom processing on templates](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-macros.html) .", + "Capabilities": "In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for CloudFormation to create the stack.\n\n- `CAPABILITY_IAM` and `CAPABILITY_NAMED_IAM`\n\nSome stack templates might include resources that can affect permissions in your AWS account ; for example, by creating new AWS Identity and Access Management (IAM) users. For those stacks, you must explicitly acknowledge this by specifying one of these capabilities.\n\nThe following IAM resources require you to specify either the `CAPABILITY_IAM` or `CAPABILITY_NAMED_IAM` capability.\n\n- If you have IAM resources, you can specify either capability.\n- If you have IAM resources with custom names, you *must* specify `CAPABILITY_NAMED_IAM` .\n- If you don't specify either of these capabilities, CloudFormation returns an `InsufficientCapabilities` error.\n\nIf your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary.\n\n- [AWS::IAM::AccessKey](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-accesskey.html)\n- [AWS::IAM::Group](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-group.html)\n- [AWS::IAM::InstanceProfile](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-instanceprofile.html)\n- [AWS::IAM::Policy](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-policy.html)\n- [AWS::IAM::Role](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-role.html)\n- [AWS::IAM::User](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-user.html)\n- [`AWS::IAM::UserToGroupAddition`](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-usertogroupaddition.html)\n\nFor more information, see [Acknowledging IAM resources in CloudFormation templates](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/control-access-with-iam.html#using-iam-capabilities) in the *AWS CloudFormation User Guide* .\n- `CAPABILITY_AUTO_EXPAND`\n\nSome template contain macros. Macros perform custom processing on templates; this can include simple actions like find-and-replace operations, all the way to extensive transformations of entire templates. Because of this, users typically create a change set from the processed template, so that they can review the changes resulting from the macros before actually creating the stack. If your stack template contains one or more macros, and you choose to create a stack directly from the processed template, without first reviewing the resulting changes in a change set, you must acknowledge this capability. This includes the [AWS::Include](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/transform-aws-include.html) and [AWS::Serverless](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/transform-aws-serverless.html) transforms, which are macros hosted by CloudFormation .\n\nIf you want to create a stack from a stack template that contains macros *and* nested stacks, you must create the stack directly from the template using this capability.\n\n> You should only create stacks directly from a stack template that contains macros if you know what processing the macro performs.\n> \n> Each macro relies on an underlying Lambda service function for processing stack templates. Be aware that the Lambda function owner can update the function operation without CloudFormation being notified. \n\nFor more information, see [Perform custom processing on CloudFormation templates with template macros](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-macros.html) in the *AWS CloudFormation User Guide* .", "ChangeSetId": "The unique ID of the change set.", "CreationTime": "The time at which the stack was created.", "Description": "A user-defined description associated with the stack.", "DisableRollback": "Set to `true` to disable rollback of the stack if stack creation failed. You can specify either `DisableRollback` or `OnFailure` , but not both.\n\nDefault: `false`", - "EnableTerminationProtection": "Whether to enable termination protection on the specified stack. If a user attempts to delete a stack with termination protection enabled, the operation fails and the stack remains unchanged. For more information, see [Protecting a Stack From Being Deleted](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-protect-stacks.html) in the *AWS CloudFormation User Guide* . Termination protection is deactivated on stacks by default.\n\nFor [nested stacks](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-nested-stacks.html) , termination protection is set on the root stack and can't be changed directly on the nested stack.", + "EnableTerminationProtection": "Whether to enable termination protection on the specified stack. If a user attempts to delete a stack with termination protection enabled, the operation fails and the stack remains unchanged. For more information, see [Protect CloudFormation stacks from being deleted](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-protect-stacks.html) in the *AWS CloudFormation User Guide* . Termination protection is deactivated on stacks by default.\n\nFor nested stacks, termination protection is set on the root stack and can't be changed directly on the nested stack.", "LastUpdateTime": "The time the stack was last updated. This field will only be returned if the stack has been updated at least once.", "NotificationARNs": "The Amazon SNS topic ARNs to publish stack related events. You can find your Amazon SNS topic ARNs using the Amazon SNS console or your Command Line Interface (CLI).", "Outputs": "A list of output structures.", "Parameters": "The set value pairs that represent the parameters passed to CloudFormation when this nested stack is created. Each parameter has a name corresponding to a parameter defined in the embedded template and a value representing the value that you want to set for the parameter.\n\n> If you use the `Ref` function to pass a parameter value to a nested stack, comma-delimited list parameters must be of type `String` . In other words, you can't pass values that are of type `CommaDelimitedList` to nested stacks. \n\nConditional. Required if the nested stack requires input parameters.\n\nWhether an update causes interruptions depends on the resources that are being updated. An update never causes a nested stack to be replaced.", "ParentId": "For nested stacks--stacks created as resources for another stack--the stack ID of the direct parent of this stack. For the first level of nested stacks, the root stack is also the parent stack.\n\nFor more information, see [Embed stacks within other stacks using nested stacks](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-nested-stacks.html) in the *AWS CloudFormation User Guide* .", - "RoleARN": "The Amazon Resource Name (ARN) of an AWS Identity and Access Management (IAM) role that AWS CloudFormation assumes to create the stack. AWS CloudFormation uses the role's credentials to make calls on your behalf. AWS CloudFormation always uses this role for all future operations on the stack. Provided that users have permission to operate on the stack, AWS CloudFormation uses this role even if the users don't have permission to pass it. Ensure that the role grants least privilege.\n\nIf you don't specify a value, AWS CloudFormation uses the role that was previously associated with the stack. If no role is available, AWS CloudFormation uses a temporary session that's generated from your user credentials.", + "RoleARN": "The Amazon Resource Name (ARN) of an IAM role that CloudFormation assumes to create the stack. CloudFormation uses the role's credentials to make calls on your behalf. CloudFormation always uses this role for all future operations on the stack. Provided that users have permission to operate on the stack, CloudFormation uses this role even if the users don't have permission to pass it. Ensure that the role grants least privilege.\n\nIf you don't specify a value, CloudFormation uses the role that was previously associated with the stack. If no role is available, CloudFormation uses a temporary session that's generated from your user credentials.", "RootId": "For nested stacks--stacks created as resources for another stack--the stack ID of the top-level stack to which the nested stack ultimately belongs.\n\nFor more information, see [Embed stacks within other stacks using nested stacks](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-nested-stacks.html) in the *AWS CloudFormation User Guide* .", "StackId": "Unique identifier of the stack.", "StackName": "The name that's associated with the stack. The name must be unique in the Region in which you are creating the stack.\n\n> A stack name can contain only alphanumeric characters (case sensitive) and hyphens. It must start with an alphabetical character and can't be longer than 128 characters.", - "StackPolicyBody": "Structure containing the stack policy body. For more information, go to [Prevent Updates to Stack Resources](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/protect-stack-resources.html) in the *AWS CloudFormation User Guide* . You can specify either the `StackPolicyBody` or the `StackPolicyURL` parameter, but not both.", + "StackPolicyBody": "Structure containing the stack policy body. For more information, go to [Prevent updates to stack resources](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/protect-stack-resources.html) in the *AWS CloudFormation User Guide* . You can specify either the `StackPolicyBody` or the `StackPolicyURL` parameter, but not both.", "StackPolicyURL": "Location of a file containing the stack policy. The URL must point to a policy (maximum size: 16 KB) located in an S3 bucket in the same Region as the stack. You can specify either the `StackPolicyBody` or the `StackPolicyURL` parameter, but not both.", "StackStatus": "Current status of the stack.", "StackStatusReason": "Success/failure message associated with the stack status.", "Tags": "Key-value pairs to associate with this stack. CloudFormation also propagates these tags to the resources created in the stack. A maximum number of 50 tags can be specified.", - "TemplateBody": "Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes. For more information, go to [Template anatomy](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-anatomy.html) in the AWS CloudFormation User Guide.\n\nConditional: You must specify either the `TemplateBody` or the `TemplateURL` parameter, but not both.", - "TemplateURL": "Location of file containing the template body. The URL must point to a template (max size: 460,800 bytes) that's located in an Amazon S3 bucket. For more information, see [Template anatomy](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-anatomy.html) .\n\nWhether an update causes interruptions depends on the resources that are being updated. An update never causes a nested stack to be replaced.", + "TemplateBody": "Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes.\n\nConditional: You must specify either the `TemplateBody` or the `TemplateURL` parameter, but not both.", + "TemplateURL": "The URL of a file containing the template body. The URL must point to a template (max size: 1 MB) that's located in an Amazon S3 bucket. The location for an Amazon S3 bucket must start with `https://` .\n\nWhether an update causes interruptions depends on the resources that are being updated. An update never causes a nested stack to be replaced.", "TimeoutInMinutes": "The length of time, in minutes, that CloudFormation waits for the nested stack to reach the `CREATE_COMPLETE` state. The default is no timeout. When CloudFormation detects that the nested stack has reached the `CREATE_COMPLETE` state, it marks the nested stack resource as `CREATE_COMPLETE` in the parent stack and resumes creating the parent stack. If the timeout period expires before the nested stack reaches `CREATE_COMPLETE` , CloudFormation marks the nested stack as failed and rolls back both the nested stack and parent stack.\n\nUpdates aren't supported." }, "AWS::CloudFormation::Stack Output": { @@ -6748,21 +6884,21 @@ "Value": "*Required* . A string containing the value for this tag. You can specify a maximum of 256 characters for a tag value." }, "AWS::CloudFormation::StackSet": { - "AdministrationRoleARN": "The Amazon Resource Number (ARN) of the IAM role to use to create this stack set. Specify an IAM role only if you are using customized administrator roles to control which users or groups can manage specific stack sets within the same administrator account.\n\nUse customized administrator roles to control which users or groups can manage specific stack sets within the same administrator account. For more information, see [Prerequisites: Granting Permissions for Stack Set Operations](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/stacksets-prereqs.html) in the *AWS CloudFormation User Guide* .\n\n*Minimum* : `20`\n\n*Maximum* : `2048`", + "AdministrationRoleARN": "The Amazon Resource Number (ARN) of the IAM role to use to create this stack set. Specify an IAM role only if you are using customized administrator roles to control which users or groups can manage specific stack sets within the same administrator account.\n\nUse customized administrator roles to control which users or groups can manage specific stack sets within the same administrator account. For more information, see [Grant self-managed permissions](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/stacksets-prereqs-self-managed.html) in the *AWS CloudFormation User Guide* .\n\n*Minimum* : `20`\n\n*Maximum* : `2048`", "AutoDeployment": "[ `Service-managed` permissions] Describes whether StackSets automatically deploys to AWS Organizations accounts that are added to a target organization or organizational unit (OU).", "CallAs": "[Service-managed permissions] Specifies whether you are acting as an account administrator in the organization's management account or as a delegated administrator in a member account.\n\nBy default, `SELF` is specified. Use `SELF` for stack sets with self-managed permissions.\n\n- To create a stack set with service-managed permissions while signed in to the management account, specify `SELF` .\n- To create a stack set with service-managed permissions while signed in to a delegated administrator account, specify `DELEGATED_ADMIN` .\n\nYour AWS account must be registered as a delegated admin in the management account. For more information, see [Register a delegated administrator](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/stacksets-orgs-delegated-admin.html) in the *AWS CloudFormation User Guide* .\n\nStack sets with service-managed permissions are created in the management account, including stack sets that are created by delegated administrators.\n\n*Valid Values* : `SELF` | `DELEGATED_ADMIN`", - "Capabilities": "The capabilities that are allowed in the stack set. Some stack set templates might include resources that can affect permissions in your AWS account \u2014for example, by creating new AWS Identity and Access Management ( IAM ) users. For more information, see [Acknowledging IAM Resources in AWS CloudFormation Templates](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-iam-template.html#capabilities) .", + "Capabilities": "The capabilities that are allowed in the stack set. Some stack set templates might include resources that can affect permissions in your AWS account \u2014for example, by creating new IAM users. For more information, see [Acknowledging IAM resources in CloudFormation templates](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/control-access-with-iam.html#using-iam-capabilities) in the *AWS CloudFormation User Guide* .", "Description": "A description of the stack set.\n\n*Minimum* : `1`\n\n*Maximum* : `1024`", - "ExecutionRoleName": "The name of the IAM execution role to use to create the stack set. If you don't specify an execution role, AWS CloudFormation uses the `AWSCloudFormationStackSetExecutionRole` role for the stack set operation.\n\n*Minimum* : `1`\n\n*Maximum* : `64`\n\n*Pattern* : `[a-zA-Z_0-9+=,.@-]+`", + "ExecutionRoleName": "The name of the IAM execution role to use to create the stack set. If you don't specify an execution role, CloudFormation uses the `AWSCloudFormationStackSetExecutionRole` role for the stack set operation.\n\n*Minimum* : `1`\n\n*Maximum* : `64`\n\n*Pattern* : `[a-zA-Z_0-9+=,.@-]+`", "ManagedExecution": "Describes whether StackSets performs non-conflicting operations concurrently and queues conflicting operations.\n\nWhen active, StackSets performs non-conflicting operations concurrently and queues conflicting operations. After conflicting operations finish, StackSets starts queued operations in request order.\n\n> If there are already running or queued operations, StackSets queues all incoming operations even if they are non-conflicting.\n> \n> You can't modify your stack set's execution configuration while there are running or queued operations for that stack set. \n\nWhen inactive (default), StackSets performs one operation at a time in request order.", - "OperationPreferences": "The user-specified preferences for how AWS CloudFormation performs a stack set operation.", + "OperationPreferences": "The user-specified preferences for how CloudFormation performs a stack set operation.", "Parameters": "The input parameters for the stack set template.", - "PermissionModel": "Describes how the IAM roles required for stack set operations are created.\n\n- With `SELF_MANAGED` permissions, you must create the administrator and execution roles required to deploy to target accounts. For more information, see [Grant Self-Managed Stack Set Permissions](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/stacksets-prereqs-self-managed.html) .\n- With `SERVICE_MANAGED` permissions, StackSets automatically creates the IAM roles required to deploy to accounts managed by AWS Organizations .", + "PermissionModel": "Describes how the IAM roles required for stack set operations are created.\n\n- With `SELF_MANAGED` permissions, you must create the administrator and execution roles required to deploy to target accounts. For more information, see [Grant self-managed permissions](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/stacksets-prereqs-self-managed.html) in the *AWS CloudFormation User Guide* .\n- With `SERVICE_MANAGED` permissions, StackSets automatically creates the IAM roles required to deploy to accounts managed by AWS Organizations . For more information, see [Activate trusted access for stack sets with AWS Organizations](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/stacksets-orgs-activate-trusted-access.html) in the *AWS CloudFormation User Guide* .", "StackInstancesGroup": "A group of stack instances with parameters in some specific accounts and Regions.", "StackSetName": "The name to associate with the stack set. The name must be unique in the Region where you create your stack set.\n\n> The `StackSetName` property is required.", "Tags": "Key-value pairs to associate with this stack. CloudFormation also propagates these tags to supported resources in the stack. You can specify a maximum number of 50 tags.\n\nIf you don't specify this parameter, CloudFormation doesn't modify the stack's tags. If you specify an empty value, CloudFormation removes all associated tags.", "TemplateBody": "The structure that contains the template body, with a minimum length of 1 byte and a maximum length of 51,200 bytes.\n\nYou must include either `TemplateURL` or `TemplateBody` in a StackSet, but you can't use both. Dynamic references in the `TemplateBody` may not work correctly in all cases. It's recommended to pass templates containing dynamic references through `TemplateUrl` instead.", - "TemplateURL": "Location of file containing the template body. The URL must point to a template that's located in an Amazon S3 bucket or a Systems Manager document. For more information, go to [Template Anatomy](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-anatomy.html) in the AWS CloudFormation User Guide.\n\nConditional: You must specify only one of the following parameters: `TemplateBody` , `TemplateURL` ." + "TemplateURL": "The URL of a file containing the template body. The URL must point to a template (max size: 1 MB) that's located in an Amazon S3 bucket or a Systems Manager document. The location for an Amazon S3 bucket must start with `https://` .\n\nConditional: You must specify only one of the following parameters: `TemplateBody` , `TemplateURL` ." }, "AWS::CloudFormation::StackSet AutoDeployment": { "Enabled": "If set to `true` , StackSets automatically deploys additional stack instances to AWS Organizations accounts that are added to a target organization or organizational unit (OU) in the specified Regions. If an account is removed from a target organization or OU, StackSets deletes stack instances from the account in the specified Regions.", @@ -6770,24 +6906,24 @@ }, "AWS::CloudFormation::StackSet DeploymentTargets": { "AccountFilterType": "Limit deployment targets to individual accounts or include additional accounts with provided OUs.\n\nThe following is a list of possible values for the `AccountFilterType` operation.\n\n- `INTERSECTION` : StackSet deploys to the accounts specified in the `Accounts` parameter.\n- `DIFFERENCE` : StackSet deploys to the OU, excluding the accounts specified in the `Accounts` parameter.\n- `UNION` StackSet deploys to the OU, and the accounts specified in the `Accounts` parameter. `UNION` is not supported for create operations when using StackSet as a resource.", - "Accounts": "The names of one or more AWS accounts for which you want to deploy stack set updates.\n\n*Pattern* : `^[0-9]{12}$`", - "AccountsUrl": "Returns the value of the `AccountsUrl` property.", - "OrganizationalUnitIds": "The organization root ID or organizational unit (OU) IDs to which StackSets deploys.\n\n*Pattern* : `^(ou-[a-z0-9]{4,32}-[a-z0-9]{8,32}|r-[a-z0-9]{4,32})$`" + "Accounts": "The account IDs of the AWS accounts . If you have many account numbers, you can provide those accounts using the `AccountsUrl` property instead.\n\n*Pattern* : `^[0-9]{12}$`", + "AccountsUrl": "The Amazon S3 URL path to a file that contains a list of AWS account IDs. The file format must be either `.csv` or `.txt` , and the data can be comma-separated or new-line-separated. There is currently a 10MB limit for the data (approximately 800,000 accounts).", + "OrganizationalUnitIds": "The organization root ID or organizational unit (OU) IDs.\n\n*Pattern* : `^(ou-[a-z0-9]{4,32}-[a-z0-9]{8,32}|r-[a-z0-9]{4,32})$`" }, "AWS::CloudFormation::StackSet ManagedExecution": { "Active": "When `true` , StackSets performs non-conflicting operations concurrently and queues conflicting operations. After conflicting operations finish, StackSets starts queued operations in request order.\n\n> If there are already running or queued operations, StackSets queues all incoming operations even if they are non-conflicting.\n> \n> You can't modify your stack set's execution configuration while there are running or queued operations for that stack set. \n\nWhen `false` (default), StackSets performs one operation at a time in request order." }, "AWS::CloudFormation::StackSet OperationPreferences": { "ConcurrencyMode": "Specifies how the concurrency level behaves during the operation execution.\n\n- `STRICT_FAILURE_TOLERANCE` : This option dynamically lowers the concurrency level to ensure the number of failed accounts never exceeds the value of `FailureToleranceCount` +1. The initial actual concurrency is set to the lower of either the value of the `MaxConcurrentCount` , or the value of `FailureToleranceCount` +1. The actual concurrency is then reduced proportionally by the number of failures. This is the default behavior.\n\nIf failure tolerance or Maximum concurrent accounts are set to percentages, the behavior is similar.\n- `SOFT_FAILURE_TOLERANCE` : This option decouples `FailureToleranceCount` from the actual concurrency. This allows stack set operations to run at the concurrency level set by the `MaxConcurrentCount` value, or `MaxConcurrentPercentage` , regardless of the number of failures.", - "FailureToleranceCount": "The number of accounts, per Region, for which this operation can fail before AWS CloudFormation stops the operation in that Region. If the operation is stopped in a Region, AWS CloudFormation doesn't attempt the operation in any subsequent Regions.\n\nConditional: You must specify either `FailureToleranceCount` or `FailureTolerancePercentage` (but not both).", - "FailureTolerancePercentage": "The percentage of accounts, per Region, for which this stack operation can fail before AWS CloudFormation stops the operation in that Region. If the operation is stopped in a Region, AWS CloudFormation doesn't attempt the operation in any subsequent Regions.\n\nWhen calculating the number of accounts based on the specified percentage, AWS CloudFormation rounds *down* to the next whole number.\n\nConditional: You must specify either `FailureToleranceCount` or `FailureTolerancePercentage` , but not both.", + "FailureToleranceCount": "The number of accounts, per Region, for which this operation can fail before CloudFormation stops the operation in that Region. If the operation is stopped in a Region, CloudFormation doesn't attempt the operation in any subsequent Regions.\n\nConditional: You must specify either `FailureToleranceCount` or `FailureTolerancePercentage` (but not both).", + "FailureTolerancePercentage": "The percentage of accounts, per Region, for which this stack operation can fail before CloudFormation stops the operation in that Region. If the operation is stopped in a Region, CloudFormation doesn't attempt the operation in any subsequent Regions.\n\nWhen calculating the number of accounts based on the specified percentage, CloudFormation rounds *down* to the next whole number.\n\nConditional: You must specify either `FailureToleranceCount` or `FailureTolerancePercentage` , but not both.", "MaxConcurrentCount": "The maximum number of accounts in which to perform this operation at one time. This is dependent on the value of `FailureToleranceCount` . `MaxConcurrentCount` is at most one more than the `FailureToleranceCount` .\n\nNote that this setting lets you specify the *maximum* for operations. For large deployments, under certain circumstances the actual number of accounts acted upon concurrently may be lower due to service throttling.\n\nConditional: You must specify either `MaxConcurrentCount` or `MaxConcurrentPercentage` , but not both.", - "MaxConcurrentPercentage": "The maximum percentage of accounts in which to perform this operation at one time.\n\nWhen calculating the number of accounts based on the specified percentage, AWS CloudFormation rounds down to the next whole number. This is true except in cases where rounding down would result is zero. In this case, CloudFormation sets the number as one instead.\n\nNote that this setting lets you specify the *maximum* for operations. For large deployments, under certain circumstances the actual number of accounts acted upon concurrently may be lower due to service throttling.\n\nConditional: You must specify either `MaxConcurrentCount` or `MaxConcurrentPercentage` , but not both.", + "MaxConcurrentPercentage": "The maximum percentage of accounts in which to perform this operation at one time.\n\nWhen calculating the number of accounts based on the specified percentage, CloudFormation rounds down to the next whole number. This is true except in cases where rounding down would result is zero. In this case, CloudFormation sets the number as one instead.\n\nNote that this setting lets you specify the *maximum* for operations. For large deployments, under certain circumstances the actual number of accounts acted upon concurrently may be lower due to service throttling.\n\nConditional: You must specify either `MaxConcurrentCount` or `MaxConcurrentPercentage` , but not both.", "RegionConcurrencyType": "The concurrency type of deploying StackSets operations in Regions, could be in parallel or one Region at a time.", "RegionOrder": "The order of the Regions where you want to perform the stack operation.\n\n> `RegionOrder` isn't followed if `AutoDeployment` is enabled." }, "AWS::CloudFormation::StackSet Parameter": { - "ParameterKey": "The key associated with the parameter. If you don't specify a key and value for a particular parameter, AWS CloudFormation uses the default value that's specified in your template.", + "ParameterKey": "The key associated with the parameter. If you don't specify a key and value for a particular parameter, CloudFormation uses the default value that's specified in your template.", "ParameterValue": "The input value associated with the parameter." }, "AWS::CloudFormation::StackSet StackInstances": { @@ -6817,10 +6953,31 @@ }, "AWS::CloudFormation::WaitCondition": { "Count": "The number of success signals that CloudFormation must receive before it continues the stack creation process. When the wait condition receives the requisite number of success signals, CloudFormation resumes the creation of the stack. If the wait condition doesn't receive the specified number of success signals before the Timeout period expires, CloudFormation assumes that the wait condition has failed and rolls the stack back.\n\nUpdates aren't supported.", - "Handle": "A reference to the wait condition handle used to signal this wait condition. Use the `Ref` intrinsic function to specify an [`AWS::CloudFormation::WaitConditionHandle`](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-waitconditionhandle.html) resource.\n\nAnytime you add a `WaitCondition` resource during a stack update, you must associate the wait condition with a new WaitConditionHandle resource. Don't reuse an old wait condition handle that has already been defined in the template. If you reuse a wait condition handle, the wait condition might evaluate old signals from a previous create or update stack command.\n\nUpdates aren't supported.", + "Handle": "A reference to the wait condition handle used to signal this wait condition. Use the `Ref` intrinsic function to specify an [AWS::CloudFormation::WaitConditionHandle](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-waitconditionhandle.html) resource.\n\nAnytime you add a `WaitCondition` resource during a stack update, you must associate the wait condition with a new WaitConditionHandle resource. Don't reuse an old wait condition handle that has already been defined in the template. If you reuse a wait condition handle, the wait condition might evaluate old signals from a previous create or update stack command.\n\nUpdates aren't supported.", "Timeout": "The length of time (in seconds) to wait for the number of signals that the `Count` property specifies. `Timeout` is a minimum-bound property, meaning the timeout occurs no sooner than the time you specify, but can occur shortly thereafter. The maximum time that can be specified for this property is 12 hours (43200 seconds).\n\nUpdates aren't supported." }, "AWS::CloudFormation::WaitConditionHandle": {}, + "AWS::CloudFront::AnycastIpList": { + "IpCount": "The number of IP addresses in the Anycast static IP list.", + "Name": "The name of the Anycast static IP list.", + "Tags": "A complex type that contains zero or more `Tag` elements." + }, + "AWS::CloudFront::AnycastIpList AnycastIpList": { + "AnycastIps": "The static IP addresses that are allocated to the Anycast static IP list.", + "Arn": "The Amazon Resource Name (ARN) of the Anycast static IP list.", + "Id": "The ID of the Anycast static IP list.", + "IpCount": "The number of IP addresses in the Anycast static IP list.", + "LastModifiedTime": "The last time the Anycast static IP list was modified.", + "Name": "The name of the Anycast static IP list.", + "Status": "The status of the Anycast static IP list. Valid values: `Deployed` , `Deploying` , or `Failed` ." + }, + "AWS::CloudFront::AnycastIpList Tag": { + "Key": "A string that contains `Tag` key.\n\nThe string length should be between 1 and 128 characters. Valid characters include `a-z` , `A-Z` , `0-9` , space, and the special characters `_ - . : / = + @` .", + "Value": "A string that contains an optional `Tag` value.\n\nThe string length should be between 0 and 256 characters. Valid characters include `a-z` , `A-Z` , `0-9` , space, and the special characters `_ - . : / = + @` ." + }, + "AWS::CloudFront::AnycastIpList Tags": { + "Items": "A complex type that contains `Tag` elements." + }, "AWS::CloudFront::CachePolicy": { "CachePolicyConfig": "The cache policy configuration." }, @@ -6906,6 +7063,7 @@ "FieldLevelEncryptionId": "The value of `ID` for the field-level encryption configuration that you want CloudFront to use for encrypting specific fields of data for this cache behavior.", "ForwardedValues": "This field is deprecated. We recommend that you use a cache policy or an origin request policy instead of this field. For more information, see [Working with policies](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/working-with-policies.html) in the *Amazon CloudFront Developer Guide* .\n\nIf you want to include values in the cache key, use a cache policy. For more information, see [Creating cache policies](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-the-cache-key.html#cache-key-create-cache-policy) or [Using the managed cache policies](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/using-managed-cache-policies.html) in the *Amazon CloudFront Developer Guide* .\n\nIf you want to send values to the origin but not include them in the cache key, use an origin request policy. For more information, see [Creating origin request policies](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-origin-requests.html#origin-request-create-origin-request-policy) or [Using the managed origin request policies](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/using-managed-origin-request-policies.html) in the *Amazon CloudFront Developer Guide* .\n\nA `CacheBehavior` must include either a `CachePolicyId` or `ForwardedValues` . We recommend that you use a `CachePolicyId` .\n\nA complex type that specifies how CloudFront handles query strings, cookies, and HTTP headers.", "FunctionAssociations": "A list of CloudFront functions that are associated with this cache behavior. CloudFront functions must be published to the `LIVE` stage to associate them with a cache behavior.", + "GrpcConfig": "The gRPC configuration for your cache behavior.", "LambdaFunctionAssociations": "A complex type that contains zero or more Lambda@Edge function associations for a cache behavior.", "MaxTTL": "This field is deprecated. We recommend that you use the `MaxTTL` field in a cache policy instead of this field. For more information, see [Creating cache policies](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-the-cache-key.html#cache-key-create-cache-policy) or [Using the managed cache policies](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/using-managed-cache-policies.html) in the *Amazon CloudFront Developer Guide* .\n\nThe maximum amount of time that you want objects to stay in CloudFront caches before CloudFront forwards another request to your origin to determine whether the object has been updated. The value that you specify applies only when your origin adds HTTP headers such as `Cache-Control max-age` , `Cache-Control s-maxage` , and `Expires` to objects. For more information, see [Managing How Long Content Stays in an Edge Cache (Expiration)](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/Expiration.html) in the *Amazon CloudFront Developer Guide* .", "MinTTL": "This field is deprecated. We recommend that you use the `MinTTL` field in a cache policy instead of this field. For more information, see [Creating cache policies](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-the-cache-key.html#cache-key-create-cache-policy) or [Using the managed cache policies](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/using-managed-cache-policies.html) in the *Amazon CloudFront Developer Guide* .\n\nThe minimum amount of time that you want objects to stay in CloudFront caches before CloudFront forwards another request to your origin to determine whether the object has been updated. For more information, see [Managing How Long Content Stays in an Edge Cache (Expiration)](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/Expiration.html) in the *Amazon CloudFront Developer Guide* .\n\nYou must specify `0` for `MinTTL` if you configure CloudFront to forward all headers to your origin (under `Headers` , if you specify `1` for `Quantity` and `*` for `Name` ).", @@ -6932,9 +7090,9 @@ "AWS::CloudFront::Distribution CustomOriginConfig": { "HTTPPort": "The HTTP port that CloudFront uses to connect to the origin. Specify the HTTP port that the origin listens on.", "HTTPSPort": "The HTTPS port that CloudFront uses to connect to the origin. Specify the HTTPS port that the origin listens on.", - "OriginKeepaliveTimeout": "Specifies how long, in seconds, CloudFront persists its connection to the origin. The minimum timeout is 1 second, the maximum is 60 seconds, and the default (if you don't specify otherwise) is 5 seconds.\n\nFor more information, see [Origin Keep-alive Timeout](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-specify.html#DownloadDistValuesOriginKeepaliveTimeout) in the *Amazon CloudFront Developer Guide* .", + "OriginKeepaliveTimeout": "Specifies how long, in seconds, CloudFront persists its connection to the origin. The minimum timeout is 1 second, the maximum is 60 seconds, and the default (if you don't specify otherwise) is 5 seconds.\n\nFor more information, see [Keep-alive timeout (custom origins only)](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-specify.html#DownloadDistValuesOriginKeepaliveTimeout) in the *Amazon CloudFront Developer Guide* .", "OriginProtocolPolicy": "Specifies the protocol (HTTP or HTTPS) that CloudFront uses to connect to the origin. Valid values are:\n\n- `http-only` \u2013 CloudFront always uses HTTP to connect to the origin.\n- `match-viewer` \u2013 CloudFront connects to the origin using the same protocol that the viewer used to connect to CloudFront.\n- `https-only` \u2013 CloudFront always uses HTTPS to connect to the origin.", - "OriginReadTimeout": "Specifies how long, in seconds, CloudFront waits for a response from the origin. This is also known as the *origin response timeout* . The minimum timeout is 1 second, the maximum is 60 seconds, and the default (if you don't specify otherwise) is 30 seconds.\n\nFor more information, see [Origin Response Timeout](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-specify.html#DownloadDistValuesOriginResponseTimeout) in the *Amazon CloudFront Developer Guide* .", + "OriginReadTimeout": "Specifies how long, in seconds, CloudFront waits for a response from the origin. This is also known as the *origin response timeout* . The minimum timeout is 1 second, the maximum is 60 seconds, and the default (if you don't specify otherwise) is 30 seconds.\n\nFor more information, see [Response timeout (custom origins only)](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-specify.html#DownloadDistValuesOriginResponseTimeout) in the *Amazon CloudFront Developer Guide* .", "OriginSSLProtocols": "Specifies the minimum SSL/TLS protocol that CloudFront uses when connecting to your origin over HTTPS. Valid values include `SSLv3` , `TLSv1` , `TLSv1.1` , and `TLSv1.2` .\n\nFor more information, see [Minimum Origin SSL Protocol](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-specify.html#DownloadDistValuesOriginSSLProtocols) in the *Amazon CloudFront Developer Guide* ." }, "AWS::CloudFront::Distribution DefaultCacheBehavior": { @@ -6946,6 +7104,7 @@ "FieldLevelEncryptionId": "The value of `ID` for the field-level encryption configuration that you want CloudFront to use for encrypting specific fields of data for the default cache behavior.", "ForwardedValues": "This field is deprecated. We recommend that you use a cache policy or an origin request policy instead of this field. For more information, see [Working with policies](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/working-with-policies.html) in the *Amazon CloudFront Developer Guide* .\n\nIf you want to include values in the cache key, use a cache policy. For more information, see [Creating cache policies](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-the-cache-key.html#cache-key-create-cache-policy) or [Using the managed cache policies](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/using-managed-cache-policies.html) in the *Amazon CloudFront Developer Guide* .\n\nIf you want to send values to the origin but not include them in the cache key, use an origin request policy. For more information, see [Creating origin request policies](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-origin-requests.html#origin-request-create-origin-request-policy) or [Using the managed origin request policies](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/using-managed-origin-request-policies.html) in the *Amazon CloudFront Developer Guide* .\n\nA `DefaultCacheBehavior` must include either a `CachePolicyId` or `ForwardedValues` . We recommend that you use a `CachePolicyId` .\n\nA complex type that specifies how CloudFront handles query strings, cookies, and HTTP headers.", "FunctionAssociations": "A list of CloudFront functions that are associated with this cache behavior. Your functions must be published to the `LIVE` stage to associate them with a cache behavior.", + "GrpcConfig": "The gRPC configuration for your cache behavior.", "LambdaFunctionAssociations": "A complex type that contains zero or more Lambda@Edge function associations for a cache behavior.", "MaxTTL": "This field is deprecated. We recommend that you use the `MaxTTL` field in a cache policy instead of this field. For more information, see [Creating cache policies](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-the-cache-key.html#cache-key-create-cache-policy) or [Using the managed cache policies](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/using-managed-cache-policies.html) in the *Amazon CloudFront Developer Guide* .\n\nThe maximum amount of time that you want objects to stay in CloudFront caches before CloudFront forwards another request to your origin to determine whether the object has been updated. The value that you specify applies only when your origin adds HTTP headers such as `Cache-Control max-age` , `Cache-Control s-maxage` , and `Expires` to objects. For more information, see [Managing How Long Content Stays in an Edge Cache (Expiration)](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/Expiration.html) in the *Amazon CloudFront Developer Guide* .", "MinTTL": "This field is deprecated. We recommend that you use the `MinTTL` field in a cache policy instead of this field. For more information, see [Creating cache policies](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-the-cache-key.html#cache-key-create-cache-policy) or [Using the managed cache policies](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/using-managed-cache-policies.html) in the *Amazon CloudFront Developer Guide* .\n\nThe minimum amount of time that you want objects to stay in CloudFront caches before CloudFront forwards another request to your origin to determine whether the object has been updated. For more information, see [Managing How Long Content Stays in an Edge Cache (Expiration)](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/Expiration.html) in the *Amazon CloudFront Developer Guide* .\n\nYou must specify `0` for `MinTTL` if you configure CloudFront to forward all headers to your origin (under `Headers` , if you specify `1` for `Quantity` and `*` for `Name` ).", @@ -6960,6 +7119,7 @@ }, "AWS::CloudFront::Distribution DistributionConfig": { "Aliases": "A complex type that contains information about CNAMEs (alternate domain names), if any, for this distribution.", + "AnycastIpListId": "ID of the Anycast static IP list that is associated with the distribution.", "CNAMEs": "An alias for the CloudFront distribution's domain name.\n\n> This property is legacy. We recommend that you use [Aliases](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudfront-distribution-distributionconfig.html#cfn-cloudfront-distribution-distributionconfig-aliases) instead.", "CacheBehaviors": "A complex type that contains zero or more `CacheBehavior` elements.", "Comment": "A comment to describe the distribution. The comment cannot be longer than 128 characters.", @@ -6967,7 +7127,7 @@ "CustomErrorResponses": "A complex type that controls the following:\n\n- Whether CloudFront replaces HTTP status codes in the 4xx and 5xx range with custom error messages before returning the response to the viewer.\n- How long CloudFront caches HTTP status codes in the 4xx and 5xx range.\n\nFor more information about custom error pages, see [Customizing Error Responses](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/custom-error-pages.html) in the *Amazon CloudFront Developer Guide* .", "CustomOrigin": "The user-defined HTTP server that serves as the origin for content that CloudFront distributes.\n\n> This property is legacy. We recommend that you use [Origin](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudfront-distribution-origin.html) instead.", "DefaultCacheBehavior": "A complex type that describes the default cache behavior if you don't specify a `CacheBehavior` element or if files don't match any of the values of `PathPattern` in `CacheBehavior` elements. You must create exactly one default cache behavior.", - "DefaultRootObject": "The object that you want CloudFront to request from your origin (for example, `index.html` ) when a viewer requests the root URL for your distribution ( `https://www.example.com` ) instead of an object in your distribution ( `https://www.example.com/product-description.html` ). Specifying a default root object avoids exposing the contents of your distribution.\n\nSpecify only the object name, for example, `index.html` . Don't add a `/` before the object name.\n\nIf you don't want to specify a default root object when you create a distribution, include an empty `DefaultRootObject` element.\n\nTo delete the default root object from an existing distribution, update the distribution configuration and include an empty `DefaultRootObject` element.\n\nTo replace the default root object, update the distribution configuration and specify the new object.\n\nFor more information about the default root object, see [Creating a Default Root Object](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/DefaultRootObject.html) in the *Amazon CloudFront Developer Guide* .", + "DefaultRootObject": "When a viewer requests the root URL for your distribution, the default root object is the object that you want CloudFront to request from your origin. For example, if your root URL is `https://www.example.com` , you can specify CloudFront to return the `index.html` file as the default root object. You can specify a default root object so that viewers see a specific file or object, instead of another object in your distribution (for example, `https://www.example.com/product-description.html` ). A default root object avoids exposing the contents of your distribution.\n\nYou can specify the object name or a path to the object name (for example, `index.html` or `exampleFolderName/index.html` ). Your string can't begin with a forward slash ( `/` ). Only specify the object name or the path to the object.\n\nIf you don't want to specify a default root object when you create a distribution, include an empty `DefaultRootObject` element.\n\nTo delete the default root object from an existing distribution, update the distribution configuration and include an empty `DefaultRootObject` element.\n\nTo replace the default root object, update the distribution configuration and specify the new object.\n\nFor more information about the default root object, see [Specify a default root object](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/DefaultRootObject.html) in the *Amazon CloudFront Developer Guide* .", "Enabled": "From this field, you can enable or disable the selected distribution.", "HttpVersion": "(Optional) Specify the HTTP version(s) that you want viewers to use to communicate with CloudFront . The default value for new distributions is `http1.1` .\n\nFor viewers and CloudFront to use HTTP/2, viewers must support TLSv1.2 or later, and must support Server Name Indication (SNI).\n\nFor viewers and CloudFront to use HTTP/3, viewers must support TLSv1.3 and Server Name Indication (SNI). CloudFront supports HTTP/3 connection migration to allow the viewer to switch networks without losing connection. For more information about connection migration, see [Connection Migration](https://docs.aws.amazon.com/https://www.rfc-editor.org/rfc/rfc9000.html#name-connection-migration) at RFC 9000. For more information about supported TLSv1.3 ciphers, see [Supported protocols and ciphers between viewers and CloudFront](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/secure-connections-supported-viewer-protocols-ciphers.html) .", "IPV6Enabled": "If you want CloudFront to respond to IPv6 DNS requests with an IPv6 address for your distribution, specify `true` . If you specify `false` , CloudFront responds to IPv6 DNS requests with the DNS response code `NOERROR` and with no IP addresses. This allows viewers to submit a second request, for an IPv4 address for your distribution.\n\nIn general, you should enable IPv6 if you have users on IPv6 networks who want to access your content. However, if you're using signed URLs or signed cookies to restrict access to your content, and if you're using a custom policy that includes the `IpAddress` parameter to restrict the IP addresses that can access your content, don't enable IPv6. If you want to restrict access to some content by IP address and not restrict access to other content (or restrict access but not by IP address), you can create two distributions. For more information, see [Creating a Signed URL Using a Custom Policy](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-creating-signed-url-custom-policy.html) in the *Amazon CloudFront Developer Guide* .\n\nIf you're using an Amazon Route\u00a053 AWS Integration alias resource record set to route traffic to your CloudFront distribution, you need to create a second alias resource record set when both of the following are true:\n\n- You enable IPv6 for the distribution\n- You're using alternate domain names in the URLs for your objects\n\nFor more information, see [Routing Traffic to an Amazon CloudFront Web Distribution by Using Your Domain Name](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-cloudfront-distribution.html) in the *Amazon Route\u00a053 AWS Integration Developer Guide* .\n\nIf you created a CNAME resource record set, either with Amazon Route\u00a053 AWS Integration or with another DNS service, you don't need to make any changes. A CNAME record will route traffic to your distribution regardless of the IP address format of the viewer request.", @@ -6995,6 +7155,9 @@ "Locations": "A complex type that contains a `Location` element for each country in which you want CloudFront either to distribute your content ( `whitelist` ) or not distribute your content ( `blacklist` ).\n\nThe `Location` element is a two-letter, uppercase country code for a country that you want to include in your `blacklist` or `whitelist` . Include one `Location` element for each country.\n\nCloudFront and `MaxMind` both use `ISO 3166` country codes. For the current list of countries and the corresponding codes, see `ISO 3166-1-alpha-2` code on the *International Organization for Standardization* website. You can also refer to the country list on the CloudFront console, which includes both country names and codes.", "RestrictionType": "The method that you want to use to restrict distribution of your content by country:\n\n- `none` : No geo restriction is enabled, meaning access to content is not restricted by client geo location.\n- `blacklist` : The `Location` elements specify the countries in which you don't want CloudFront to distribute your content.\n- `whitelist` : The `Location` elements specify the countries in which you want CloudFront to distribute your content." }, + "AWS::CloudFront::Distribution GrpcConfig": { + "Enabled": "Enables your CloudFront distribution to receive gRPC requests and to proxy them directly to your origins." + }, "AWS::CloudFront::Distribution LambdaFunctionAssociation": { "EventType": "Specifies the event type that triggers a Lambda@Edge function invocation. You can specify the following values:\n\n- `viewer-request` : The function executes when CloudFront receives a request from a viewer and before it checks to see whether the requested object is in the edge cache.\n- `origin-request` : The function executes only when CloudFront sends a request to your origin. When the requested object is in the edge cache, the function doesn't execute.\n- `origin-response` : The function executes after CloudFront receives a response from the origin and before it caches the object in the response. When the requested object is in the edge cache, the function doesn't execute.\n- `viewer-response` : The function executes before CloudFront returns the requested object to the viewer. The function executes regardless of whether the object was already in the edge cache.\n\nIf the origin returns an HTTP status code other than HTTP 200 (OK), the function doesn't execute.", "IncludeBody": "A flag that allows a Lambda@Edge function to have read access to the body content. For more information, see [Accessing the Request Body by Choosing the Include Body Option](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/lambda-include-body-access.html) in the Amazon CloudFront Developer Guide.", @@ -7035,7 +7198,8 @@ "AWS::CloudFront::Distribution OriginGroup": { "FailoverCriteria": "A complex type that contains information about the failover criteria for an origin group.", "Id": "The origin group's ID.", - "Members": "A complex type that contains information about the origins in an origin group." + "Members": "A complex type that contains information about the origins in an origin group.", + "SelectionCriteria": "The selection criteria for the origin group. For more information, see [Create an origin group](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/high_availability_origin_failover.html#concept_origin_groups.creating) in the *Amazon CloudFront Developer Guide* ." }, "AWS::CloudFront::Distribution OriginGroupFailoverCriteria": { "StatusCodes": "The status codes that, when returned from the primary origin, will trigger CloudFront to failover to the second origin." @@ -7354,7 +7518,7 @@ "AWS::CloudTrail::EventDataStore AdvancedFieldSelector": { "EndsWith": "An operator that includes events that match the last few characters of the event record field specified as the value of `Field` .", "Equals": "An operator that includes events that match the exact value of the event record field specified as the value of `Field` . This is the only valid operator that you can use with the `readOnly` , `eventCategory` , and `resources.type` fields.", - "Field": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `eventCategory` (required), `eventSource` , and `readOnly` . The following additional fields are available for event data stores: `eventName` , `eventType` , `sessionCredentialFromConsole` , and `userIdentity.arn` .\n\nFor CloudTrail data events, supported fields include `eventCategory` (required), `resources.type` (required), `eventName` , `readOnly` , and `resources.ARN` . The following additional fields are available for event data stores: `eventSource` , `eventType` , `sessionCredentialFromConsole` , and `userIdentity.arn` .\n\nFor CloudTrail network activity events, supported fields include `eventCategory` (required), `eventSource` (required), `eventName` , `errorCode` , and `vpcEndpointId` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - This is an optional field that is only used for management events and data events. This field can be set to `Equals` with a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - This field is only used for management events, data events (for event data stores only), and network activity events.\n\nFor management events for trails, this is an optional field that can be set to `NotEquals` `kms.amazonaws.com` to exclude KMS management events, or `NotEquals` `rdsdata.amazonaws.com` to exclude RDS management events.\n\nFor management and data events for event data stores, you can use it to include or exclude any event source and can use any operator.\n\nFor network activity events, this is a required field that only uses the `Equals` operator. Set this field to the event source for which you want to log network activity events. If you want to log network activity events for multiple event sources, you must create a separate field selector for each event source.\n\nThe following are valid values for network activity events:\n\n- `cloudtrail.amazonaws.com`\n- `ec2.amazonaws.com`\n- `kms.amazonaws.com`\n- `secretsmanager.amazonaws.com`\n- *`eventName`* - This is an optional field that is only used for data events, management events (for event data stores only), and network activity events. You can use any operator with `eventName` . You can use it to \ufb01lter in or \ufb01lter out specific events. You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This field is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n- For CloudTrail network activity events, the value must be `NetworkActivity` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For events outside of AWS , the value must be `ActivityAuditLog` .\n- *`eventType`* - This is an optional field available only for event data stores, which is used to filter management and data events on the event type. For information about available event types, see [CloudTrail record contents](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-event-reference-record-contents.html#ct-event-type) in the *AWS CloudTrail user guide* .\n- *`errorCode`* - This \ufb01eld is only used to filter CloudTrail network activity events and is optional. This is the error code to filter on. Currently, the only valid `errorCode` is `VpceAccessDenied` . `errorCode` can only use the `Equals` operator.\n- *`sessionCredentialFromConsole`* - This is an optional field available only for event data stores, which is used to filter management and data events based on whether the events originated from an AWS Management Console session. `sessionCredentialFromConsole` can only use the `Equals` and `NotEquals` operators.\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator.\n\nFor a list of available resource types for data events, see [Data events](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-data-events-with-cloudtrail.html#logging-data-events) in the *AWS CloudTrail User Guide* .\n\nYou can have only one `resources.type` \ufb01eld per selector. To log events on more than one resource type, add another selector.\n- *`resources.ARN`* - The `resources.ARN` is an optional field for data events. You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value.\n\nFor information about filtering data events on the `resources.ARN` field, see [Filtering data events by resources.ARN](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/filtering-data-events.html#filtering-data-events-resourcearn) in the *AWS CloudTrail User Guide* .\n\n> You can't use the `resources.ARN` field to filter resource types that do not have ARNs.\n- *`userIdentity.arn`* - This is an optional field available only for event data stores, which is used to filter management and data events on the userIdentity ARN. You can use any operator with `userIdentity.arn` . For more information on the userIdentity element, see [CloudTrail userIdentity element](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-event-reference-user-identity.html) in the *AWS CloudTrail User Guide* .\n- *`vpcEndpointId`* - This \ufb01eld is only used to filter CloudTrail network activity events and is optional. This field identifies the VPC endpoint that the request passed through. You can use any operator with `vpcEndpointId` .", + "Field": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `eventCategory` (required), `eventSource` , and `readOnly` . The following additional fields are available for event data stores: `eventName` , `eventType` , `sessionCredentialFromConsole` , and `userIdentity.arn` .\n\nFor CloudTrail data events, supported fields include `eventCategory` (required), `resources.type` (required), `eventName` , `readOnly` , and `resources.ARN` . The following additional fields are available for event data stores: `eventSource` , `eventType` , `sessionCredentialFromConsole` , and `userIdentity.arn` .\n\nFor CloudTrail network activity events, supported fields include `eventCategory` (required), `eventSource` (required), `eventName` , `errorCode` , and `vpcEndpointId` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n> Selectors don't support the use of wildcards like `*` . To match multiple values with a single condition, you may use `StartsWith` , `EndsWith` , `NotStartsWith` , or `NotEndsWith` to explicitly match the beginning or end of the event field. \n\n- *`readOnly`* - This is an optional field that is only used for management events and data events. This field can be set to `Equals` with a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - This field is only used for management events, data events (for event data stores only), and network activity events.\n\nFor management events for trails, this is an optional field that can be set to `NotEquals` `kms.amazonaws.com` to exclude KMS management events, or `NotEquals` `rdsdata.amazonaws.com` to exclude RDS management events.\n\nFor management and data events for event data stores, you can use it to include or exclude any event source and can use any operator.\n\nFor network activity events, this is a required field that only uses the `Equals` operator. Set this field to the event source for which you want to log network activity events. If you want to log network activity events for multiple event sources, you must create a separate field selector for each event source.\n\nThe following are valid values for network activity events:\n\n- `cloudtrail.amazonaws.com`\n- `ec2.amazonaws.com`\n- `kms.amazonaws.com`\n- `secretsmanager.amazonaws.com`\n- *`eventName`* - This is an optional field that is only used for data events, management events (for event data stores only), and network activity events. You can use any operator with `eventName` . You can use it to \ufb01lter in or \ufb01lter out specific events. You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This field is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n- For CloudTrail network activity events, the value must be `NetworkActivity` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For events outside of AWS , the value must be `ActivityAuditLog` .\n- *`eventType`* - This is an optional field available only for event data stores, which is used to filter management and data events on the event type. For information about available event types, see [CloudTrail record contents](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-event-reference-record-contents.html#ct-event-type) in the *AWS CloudTrail user guide* .\n- *`errorCode`* - This \ufb01eld is only used to filter CloudTrail network activity events and is optional. This is the error code to filter on. Currently, the only valid `errorCode` is `VpceAccessDenied` . `errorCode` can only use the `Equals` operator.\n- *`sessionCredentialFromConsole`* - This is an optional field available only for event data stores, which is used to filter management and data events based on whether the events originated from an AWS Management Console session. `sessionCredentialFromConsole` can only use the `Equals` and `NotEquals` operators.\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator.\n\nFor a list of available resource types for data events, see [Data events](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-data-events-with-cloudtrail.html#logging-data-events) in the *AWS CloudTrail User Guide* .\n\nYou can have only one `resources.type` \ufb01eld per selector. To log events on more than one resource type, add another selector.\n- *`resources.ARN`* - The `resources.ARN` is an optional field for data events. You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value.\n\nFor information about filtering data events on the `resources.ARN` field, see [Filtering data events by resources.ARN](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/filtering-data-events.html#filtering-data-events-resourcearn) in the *AWS CloudTrail User Guide* .\n\n> You can't use the `resources.ARN` field to filter resource types that do not have ARNs.\n- *`userIdentity.arn`* - This is an optional field available only for event data stores, which is used to filter management and data events on the userIdentity ARN. You can use any operator with `userIdentity.arn` . For more information on the userIdentity element, see [CloudTrail userIdentity element](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-event-reference-user-identity.html) in the *AWS CloudTrail User Guide* .\n- *`vpcEndpointId`* - This \ufb01eld is only used to filter CloudTrail network activity events and is optional. This field identifies the VPC endpoint that the request passed through. You can use any operator with `vpcEndpointId` .", "NotEndsWith": "An operator that excludes events that match the last few characters of the event record field specified as the value of `Field` .", "NotEquals": "An operator that excludes events that match the exact value of the event record field specified as the value of `Field` .", "NotStartsWith": "An operator that excludes events that match the first few characters of the event record field specified as the value of `Field` .", @@ -7396,7 +7560,7 @@ "AWS::CloudTrail::Trail AdvancedFieldSelector": { "EndsWith": "An operator that includes events that match the last few characters of the event record field specified as the value of `Field` .", "Equals": "An operator that includes events that match the exact value of the event record field specified as the value of `Field` . This is the only valid operator that you can use with the `readOnly` , `eventCategory` , and `resources.type` fields.", - "Field": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `eventCategory` (required), `eventSource` , and `readOnly` . The following additional fields are available for event data stores: `eventName` , `eventType` , `sessionCredentialFromConsole` , and `userIdentity.arn` .\n\nFor CloudTrail data events, supported fields include `eventCategory` (required), `resources.type` (required), `eventName` , `readOnly` , and `resources.ARN` . The following additional fields are available for event data stores: `eventSource` , `eventType` , `sessionCredentialFromConsole` , and `userIdentity.arn` .\n\nFor CloudTrail network activity events, supported fields include `eventCategory` (required), `eventSource` (required), `eventName` , `errorCode` , and `vpcEndpointId` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - This is an optional field that is only used for management events and data events. This field can be set to `Equals` with a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - This field is only used for management events, data events (for event data stores only), and network activity events.\n\nFor management events for trails, this is an optional field that can be set to `NotEquals` `kms.amazonaws.com` to exclude KMS management events, or `NotEquals` `rdsdata.amazonaws.com` to exclude RDS management events.\n\nFor management and data events for event data stores, you can use it to include or exclude any event source and can use any operator.\n\nFor network activity events, this is a required field that only uses the `Equals` operator. Set this field to the event source for which you want to log network activity events. If you want to log network activity events for multiple event sources, you must create a separate field selector for each event source.\n\nThe following are valid values for network activity events:\n\n- `cloudtrail.amazonaws.com`\n- `ec2.amazonaws.com`\n- `kms.amazonaws.com`\n- `secretsmanager.amazonaws.com`\n- *`eventName`* - This is an optional field that is only used for data events, management events (for event data stores only), and network activity events. You can use any operator with `eventName` . You can use it to \ufb01lter in or \ufb01lter out specific events. You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This field is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n- For CloudTrail network activity events, the value must be `NetworkActivity` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For events outside of AWS , the value must be `ActivityAuditLog` .\n- *`eventType`* - This is an optional field available only for event data stores, which is used to filter management and data events on the event type. For information about available event types, see [CloudTrail record contents](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-event-reference-record-contents.html#ct-event-type) in the *AWS CloudTrail user guide* .\n- *`errorCode`* - This \ufb01eld is only used to filter CloudTrail network activity events and is optional. This is the error code to filter on. Currently, the only valid `errorCode` is `VpceAccessDenied` . `errorCode` can only use the `Equals` operator.\n- *`sessionCredentialFromConsole`* - This is an optional field available only for event data stores, which is used to filter management and data events based on whether the events originated from an AWS Management Console session. `sessionCredentialFromConsole` can only use the `Equals` and `NotEquals` operators.\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator.\n\nFor a list of available resource types for data events, see [Data events](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-data-events-with-cloudtrail.html#logging-data-events) in the *AWS CloudTrail User Guide* .\n\nYou can have only one `resources.type` \ufb01eld per selector. To log events on more than one resource type, add another selector.\n- *`resources.ARN`* - The `resources.ARN` is an optional field for data events. You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value.\n\nFor information about filtering data events on the `resources.ARN` field, see [Filtering data events by resources.ARN](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/filtering-data-events.html#filtering-data-events-resourcearn) in the *AWS CloudTrail User Guide* .\n\n> You can't use the `resources.ARN` field to filter resource types that do not have ARNs.\n- *`userIdentity.arn`* - This is an optional field available only for event data stores, which is used to filter management and data events on the userIdentity ARN. You can use any operator with `userIdentity.arn` . For more information on the userIdentity element, see [CloudTrail userIdentity element](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-event-reference-user-identity.html) in the *AWS CloudTrail User Guide* .\n- *`vpcEndpointId`* - This \ufb01eld is only used to filter CloudTrail network activity events and is optional. This field identifies the VPC endpoint that the request passed through. You can use any operator with `vpcEndpointId` .", + "Field": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `eventCategory` (required), `eventSource` , and `readOnly` . The following additional fields are available for event data stores: `eventName` , `eventType` , `sessionCredentialFromConsole` , and `userIdentity.arn` .\n\nFor CloudTrail data events, supported fields include `eventCategory` (required), `resources.type` (required), `eventName` , `readOnly` , and `resources.ARN` . The following additional fields are available for event data stores: `eventSource` , `eventType` , `sessionCredentialFromConsole` , and `userIdentity.arn` .\n\nFor CloudTrail network activity events, supported fields include `eventCategory` (required), `eventSource` (required), `eventName` , `errorCode` , and `vpcEndpointId` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n> Selectors don't support the use of wildcards like `*` . To match multiple values with a single condition, you may use `StartsWith` , `EndsWith` , `NotStartsWith` , or `NotEndsWith` to explicitly match the beginning or end of the event field. \n\n- *`readOnly`* - This is an optional field that is only used for management events and data events. This field can be set to `Equals` with a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - This field is only used for management events, data events (for event data stores only), and network activity events.\n\nFor management events for trails, this is an optional field that can be set to `NotEquals` `kms.amazonaws.com` to exclude KMS management events, or `NotEquals` `rdsdata.amazonaws.com` to exclude RDS management events.\n\nFor management and data events for event data stores, you can use it to include or exclude any event source and can use any operator.\n\nFor network activity events, this is a required field that only uses the `Equals` operator. Set this field to the event source for which you want to log network activity events. If you want to log network activity events for multiple event sources, you must create a separate field selector for each event source.\n\nThe following are valid values for network activity events:\n\n- `cloudtrail.amazonaws.com`\n- `ec2.amazonaws.com`\n- `kms.amazonaws.com`\n- `secretsmanager.amazonaws.com`\n- *`eventName`* - This is an optional field that is only used for data events, management events (for event data stores only), and network activity events. You can use any operator with `eventName` . You can use it to \ufb01lter in or \ufb01lter out specific events. You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This field is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n- For CloudTrail network activity events, the value must be `NetworkActivity` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For events outside of AWS , the value must be `ActivityAuditLog` .\n- *`eventType`* - This is an optional field available only for event data stores, which is used to filter management and data events on the event type. For information about available event types, see [CloudTrail record contents](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-event-reference-record-contents.html#ct-event-type) in the *AWS CloudTrail user guide* .\n- *`errorCode`* - This \ufb01eld is only used to filter CloudTrail network activity events and is optional. This is the error code to filter on. Currently, the only valid `errorCode` is `VpceAccessDenied` . `errorCode` can only use the `Equals` operator.\n- *`sessionCredentialFromConsole`* - This is an optional field available only for event data stores, which is used to filter management and data events based on whether the events originated from an AWS Management Console session. `sessionCredentialFromConsole` can only use the `Equals` and `NotEquals` operators.\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator.\n\nFor a list of available resource types for data events, see [Data events](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-data-events-with-cloudtrail.html#logging-data-events) in the *AWS CloudTrail User Guide* .\n\nYou can have only one `resources.type` \ufb01eld per selector. To log events on more than one resource type, add another selector.\n- *`resources.ARN`* - The `resources.ARN` is an optional field for data events. You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value.\n\nFor information about filtering data events on the `resources.ARN` field, see [Filtering data events by resources.ARN](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/filtering-data-events.html#filtering-data-events-resourcearn) in the *AWS CloudTrail User Guide* .\n\n> You can't use the `resources.ARN` field to filter resource types that do not have ARNs.\n- *`userIdentity.arn`* - This is an optional field available only for event data stores, which is used to filter management and data events on the userIdentity ARN. You can use any operator with `userIdentity.arn` . For more information on the userIdentity element, see [CloudTrail userIdentity element](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-event-reference-user-identity.html) in the *AWS CloudTrail User Guide* .\n- *`vpcEndpointId`* - This \ufb01eld is only used to filter CloudTrail network activity events and is optional. This field identifies the VPC endpoint that the request passed through. You can use any operator with `vpcEndpointId` .", "NotEndsWith": "An operator that excludes events that match the last few characters of the event record field specified as the value of `Field` .", "NotEquals": "An operator that excludes events that match the exact value of the event record field specified as the value of `Field` .", "NotStartsWith": "An operator that excludes events that match the first few characters of the event record field specified as the value of `Field` .", @@ -8153,7 +8317,7 @@ "TimeoutInMinutes": "A timeout duration in minutes that can be applied against the ActionType\u2019s default timeout value specified in [Quotas for AWS CodePipeline](https://docs.aws.amazon.com/codepipeline/latest/userguide/limits.html) . This attribute is available only to the manual approval ActionType." }, "AWS::CodePipeline::Pipeline ActionTypeId": { - "Category": "A category defines what kind of action can be taken in the stage, and constrains the provider type for the action. Valid categories are limited to one of the values below.\n\n- `Source`\n- `Build`\n- `Test`\n- `Deploy`\n- `Invoke`\n- `Approval`", + "Category": "A category defines what kind of action can be taken in the stage, and constrains the provider type for the action. Valid categories are limited to one of the values below.\n\n- `Source`\n- `Build`\n- `Test`\n- `Deploy`\n- `Invoke`\n- `Approval`\n- `Compute`", "Owner": "The creator of the action being called. There are three valid values for the `Owner` field in the action category section within your pipeline structure: `AWS` , `ThirdParty` , and `Custom` . For more information, see [Valid Action Types and Providers in CodePipeline](https://docs.aws.amazon.com/codepipeline/latest/userguide/reference-pipeline-structure.html#actions-valid-providers) .", "Provider": "The provider of the service being called by the action. Valid providers are determined by the action category. For example, an action in the Deploy category type might have a provider of CodeDeploy, which would be specified as `CodeDeploy` . For more information, see [Valid Action Types and Providers in CodePipeline](https://docs.aws.amazon.com/codepipeline/latest/userguide/reference-pipeline-structure.html#actions-valid-providers) .", "Version": "A string that describes the action version." @@ -8419,9 +8583,9 @@ }, "AWS::Cognito::LogDeliveryConfiguration LogConfiguration": { "CloudWatchLogsConfiguration": "Configuration for the CloudWatch log group destination of user pool detailed activity logging, or of user activity log export with advanced security features.\n\nThis data type is a request parameter of [SetLogDeliveryConfiguration](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_SetLogDeliveryConfiguration.html) and a response parameter of [GetLogDeliveryConfiguration](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_GetLogDeliveryConfiguration.html) .", - "EventSource": "The source of events that your user pool sends for logging. To send error-level logs about user notification activity, set to `userNotification` . To send info-level logs about advanced security features user activity, set to `userAuthEvents` .", + "EventSource": "The source of events that your user pool sends for logging. To send error-level logs about user notification activity, set to `userNotification` . To send info-level logs about threat-protection user activity in user pools with the Plus feature plan, set to `userAuthEvents` .", "FirehoseConfiguration": "Configuration for the Amazon Data Firehose stream destination of user activity log export with advanced security features.", - "LogLevel": "The `errorlevel` selection of logs that a user pool sends for detailed activity logging. To send `userNotification` activity with [information about message delivery](https://docs.aws.amazon.com/cognito/latest/developerguide/tracking-quotas-and-usage-in-cloud-watch-logs.html) , choose `ERROR` with `CloudWatchLogsConfiguration` . To send `userAuthEvents` activity with user logs from advanced security features, choose `INFO` with one of `CloudWatchLogsConfiguration` , `FirehoseConfiguration` , or `S3Configuration` .", + "LogLevel": "The `errorlevel` selection of logs that a user pool sends for detailed activity logging. To send `userNotification` activity with [information about message delivery](https://docs.aws.amazon.com/cognito/latest/developerguide/exporting-quotas-and-usage.html) , choose `ERROR` with `CloudWatchLogsConfiguration` . To send `userAuthEvents` activity with user logs from threat protection with the Plus feature plan, choose `INFO` with one of `CloudWatchLogsConfiguration` , `FirehoseConfiguration` , or `S3Configuration` .", "S3Configuration": "Configuration for the Amazon S3 bucket destination of user activity log export with advanced security features." }, "AWS::Cognito::LogDeliveryConfiguration S3Configuration": { @@ -8429,10 +8593,10 @@ }, "AWS::Cognito::ManagedLoginBranding": { "Assets": "An array of image files that you want to apply to roles like backgrounds, logos, and icons. Each object must also indicate whether it is for dark mode, light mode, or browser-adaptive mode.", - "ClientId": "", - "ReturnMergedResources": "", + "ClientId": "The app client that's assigned to the branding style that you want more information about.", + "ReturnMergedResources": "When `true` , returns values for branding options that are unchanged from Amazon Cognito defaults. When `false` or when you omit this parameter, returns only values that you customized in your branding style.", "Settings": "A JSON file, encoded as a `Document` type, with the the settings that you want to apply to your style.", - "UseCognitoProvidedValues": "When true, applies the default branding style options. This option reverts to a \"blank\" style that you can modify later in the branding designer.", + "UseCognitoProvidedValues": "When true, applies the default branding style options. This option reverts to default style options that are managed by Amazon Cognito. You can modify them later in the branding designer.\n\nWhen you specify `true` for this option, you must also omit values for `Settings` and `Assets` in the request.", "UserPoolId": "The user pool where the branding style is assigned." }, "AWS::Cognito::ManagedLoginBranding AssetType": { @@ -8445,10 +8609,10 @@ "AWS::Cognito::UserPool": { "AccountRecoverySetting": "The available verified method a user can use to recover their password when they call `ForgotPassword` . You can use this setting to define a preferred method when a user has more than one method available. With this setting, SMS doesn't qualify for a valid password recovery mechanism if the user also has SMS multi-factor authentication (MFA) activated. In the absence of this setting, Amazon Cognito uses the legacy behavior to determine the recovery method where SMS is preferred through email.", "AdminCreateUserConfig": "The settings for administrator creation of users in a user pool. Contains settings for allowing user sign-up, customizing invitation messages to new users, and the amount of time before temporary passwords expire.\n\nThis data type is a request and response parameter of [CreateUserPool](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_CreateUserPool.html) and [UpdateUserPool](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_UpdateUserPool.html) , and a response parameter of [DescribeUserPool](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_DescribeUserPool.html) .", - "AliasAttributes": "Attributes supported as an alias for this user pool. Possible values: *phone_number* , *email* , or *preferred_username* .", - "AutoVerifiedAttributes": "The attributes to be auto-verified. Possible values: *email* , *phone_number* .", + "AliasAttributes": "Attributes supported as an alias for this user pool. Possible values: *phone_number* , *email* , or *preferred_username* . For more information about alias attributes, see [Customizing sign-in attributes](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-settings-attributes.html#user-pool-settings-aliases) .", + "AutoVerifiedAttributes": "The attributes that you want your user pool to automatically verify. Possible values: *email* , *phone_number* . For more information see [Verifying contact information at sign-up](https://docs.aws.amazon.com/cognito/latest/developerguide/signing-up-users-in-your-app.html#allowing-users-to-sign-up-and-confirm-themselves) .", "DeletionProtection": "When active, `DeletionProtection` prevents accidental deletion of your user\npool. Before you can delete a user pool that you have protected against deletion, you\nmust deactivate this feature.\n\nWhen you try to delete a protected user pool in a `DeleteUserPool` API request, Amazon Cognito returns an `InvalidParameterException` error. To delete a protected user pool, send a new `DeleteUserPool` request after you deactivate deletion protection in an `UpdateUserPool` API request.", - "DeviceConfiguration": "The device-remembering configuration for a user pool. A null value indicates that you have deactivated device remembering in your user pool.\n\n> When you provide a value for any `DeviceConfiguration` field, you activate the Amazon Cognito device-remembering feature.", + "DeviceConfiguration": "The device-remembering configuration for a user pool. Device remembering or device tracking is a \"Remember me on this device\" option for user pools that perform authentication with the device key of a trusted device in the back end, instead of a user-provided MFA code. For more information about device authentication, see [Working with user devices in your user pool](https://docs.aws.amazon.com/cognito/latest/developerguide/amazon-cognito-user-pools-device-tracking.html) . A null value indicates that you have deactivated device remembering in your user pool.\n\n> When you provide a value for any `DeviceConfiguration` field, you activate the Amazon Cognito device-remembering feature. For more infor", "EmailAuthenticationMessage": "", "EmailAuthenticationSubject": "", "EmailConfiguration": "The email configuration of your user pool. The email configuration type sets your preferred sending method, AWS Region, and sender for messages from your user pool.", @@ -8458,20 +8622,20 @@ "LambdaConfig": "A collection of user pool Lambda triggers. Amazon Cognito invokes triggers at several possible stages of authentication operations. Triggers can modify the outcome of the operations that invoked them.", "MfaConfiguration": "The multi-factor authentication (MFA) configuration. Valid values include:\n\n- `OFF` MFA won't be used for any users.\n- `ON` MFA is required for all users to sign in.\n- `OPTIONAL` MFA will be required only for individual users who have an MFA factor activated.", "Policies": "A list of user pool policies. Contains the policy that sets password-complexity requirements.\n\nThis data type is a request and response parameter of [CreateUserPool](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_CreateUserPool.html) and [UpdateUserPool](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_UpdateUserPool.html) , and a response parameter of [DescribeUserPool](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_DescribeUserPool.html) .", - "Schema": "An array of schema attributes for the new user pool. These attributes can be standard or custom attributes.", + "Schema": "An array of attributes for the new user pool. You can add custom attributes and modify the properties of default attributes. The specifications in this parameter set the required attributes in your user pool. For more information, see [Working with user attributes](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-settings-attributes.html) .", "SmsAuthenticationMessage": "The contents of the SMS authentication message.", - "SmsConfiguration": "The SMS configuration with the settings that your Amazon Cognito user pool must use to send an SMS message from your AWS account through Amazon Simple Notification Service. To send SMS messages with Amazon SNS in the AWS Region that you want, the Amazon Cognito user pool uses an AWS Identity and Access Management (IAM) role in your AWS account .", + "SmsConfiguration": "The SMS configuration with the settings that your Amazon Cognito user pool must use to send an SMS message from your AWS account through Amazon Simple Notification Service. To send SMS messages with Amazon SNS in the AWS Region that you want, the Amazon Cognito user pool uses an AWS Identity and Access Management (IAM) role in your AWS account . For more information see [SMS message settings](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-sms-settings.html) .", "SmsVerificationMessage": "This parameter is no longer used. See [VerificationMessageTemplateType](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cognito-userpool-verificationmessagetemplate.html) .", "UserAttributeUpdateSettings": "The settings for updates to user attributes. These settings include the property `AttributesRequireVerificationBeforeUpdate` ,\na user-pool setting that tells Amazon Cognito how to handle changes to the value of your users' email address and phone number attributes. For\nmore information, see [Verifying updates to email addresses and phone numbers](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-settings-email-phone-verification.html#user-pool-settings-verifications-verify-attribute-updates) .", - "UserPoolAddOns": "User pool add-ons. Contains settings for activation of advanced security features. To log user security information but take no action, set to `AUDIT` . To configure automatic security responses to risky traffic to your user pool, set to `ENFORCED` .\n\nFor more information, see [Adding advanced security to a user pool](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pool-settings-advanced-security.html) .", - "UserPoolName": "A string used to name the user pool.", + "UserPoolAddOns": "User pool add-ons. Contains settings for activation of threat protection. To log user security information but take no action, set to `AUDIT` . To configure automatic security responses to risky traffic to your user pool, set to `ENFORCED` .\n\nFor more information, see [Adding advanced security to a user pool](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pool-settings-advanced-security.html) . To activate this setting, your user pool must be on the [Plus tier](https://docs.aws.amazon.com/cognito/latest/developerguide/feature-plans-features-plus.html) .", + "UserPoolName": "A friendlhy name for your user pool.", "UserPoolTags": "The tag keys and values to assign to the user pool. A tag is a label that you can use to categorize and manage user pools in different ways, such as by purpose, owner, environment, or other criteria.", "UserPoolTier": "The user pool [feature plan](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-sign-in-feature-plans.html) , or tier. This parameter determines the eligibility of the user pool for features like managed login, access-token customization, and threat protection. Defaults to `ESSENTIALS` .", "UsernameAttributes": "Specifies whether a user can use an email address or phone number as a username when they sign up.", - "UsernameConfiguration": "Case sensitivity on the username input for the selected sign-in option. When case sensitivity is set to `False` (case insensitive), users can sign in with any combination of capital and lowercase letters. For example, `username` , `USERNAME` , or `UserName` , or for email, `email@example.com` or `EMaiL@eXamplE.Com` . For most use cases, set case sensitivity to `False` (case insensitive) as a best practice. When usernames and email addresses are case insensitive, Amazon Cognito treats any variation in case as the same user, and prevents a case variation from being assigned to the same attribute for a different user.\n\nThis configuration is immutable after you set it. For more information, see [UsernameConfigurationType](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_UsernameConfigurationType.html) .", + "UsernameConfiguration": "Sets the case sensitivity option for sign-in usernames. When `CaseSensitive` is `false` (case insensitive), users can sign in with any combination of capital and lowercase letters. For example, `username` , `USERNAME` , or `UserName` , or for email, `email@example.com` or `EMaiL@eXamplE.Com` . For most use cases, set case sensitivity to `false` as a best practice. When usernames and email addresses are case insensitive, Amazon Cognito treats any variation in case as the same user, and prevents a case variation from being assigned to the same attribute for a different user.\n\nWhen `CaseSensitive` is `true` (case sensitive), Amazon Cognito interprets `USERNAME` and `UserName` as distinct users.\n\nThis configuration is immutable after you set it.", "VerificationMessageTemplate": "The template for the verification message that your user pool delivers to users who set an email address or phone number attribute.\n\nSet the email message type that corresponds to your `DefaultEmailOption` selection. For `CONFIRM_WITH_LINK` , specify an `EmailMessageByLink` and leave `EmailMessage` blank. For `CONFIRM_WITH_CODE` , specify an `EmailMessage` and leave `EmailMessageByLink` blank. When you supply both parameters with either choice, Amazon Cognito returns an error.", - "WebAuthnRelyingPartyID": "", - "WebAuthnUserVerification": "" + "WebAuthnRelyingPartyID": "Sets or displays the authentication domain, typically your user pool domain, that passkey providers must use as a relying party (RP) in their configuration.\n\nUnder the following conditions, the passkey relying party ID must be the fully-qualified domain name of your custom domain:\n\n- The user pool is configured for passkey authentication.\n- The user pool has a custom domain, whether or not it also has a prefix domain.\n- Your application performs authentication with managed login or the classic hosted UI.", + "WebAuthnUserVerification": "When `required` , users can only register and sign in users with passkeys that are capable of [user verification](https://docs.aws.amazon.com/https://www.w3.org/TR/webauthn-2/#enum-userVerificationRequirement) . When `preferred` , your user pool doesn't require the use of authenticators with user verification but encourages it." }, "AWS::Cognito::UserPool AccountRecoverySetting": { "RecoveryMechanisms": "The list of options and priorities for user message delivery in forgot-password operations. Sets or displays user pool preferences for email or SMS message priority, whether users should fall back to a second delivery method, and whether passwords should only be reset by administrators." @@ -8482,7 +8646,7 @@ "UnusedAccountValidityDays": "This parameter is no longer in use. Configure the duration of temporary passwords with the `TemporaryPasswordValidityDays` parameter of [PasswordPolicyType](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_PasswordPolicyType.html) . For older user pools that have a `UnusedAccountValidityDays` configuration, that value is effective until you set a value for `TemporaryPasswordValidityDays` .\n\nThe password expiration limit in days for administrator-created users. When this time expires, the user can't sign in with their temporary password. To reset the account after that time limit, you must call `AdminCreateUser` again, specifying `RESEND` for the `MessageAction` parameter.\n\nThe default value for this parameter is 7." }, "AWS::Cognito::UserPool AdvancedSecurityAdditionalFlows": { - "CustomAuthMode": "" + "CustomAuthMode": "The operating mode of advanced security features in custom authentication with [Custom authentication challenge Lambda triggers](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-lambda-challenge.html) ." }, "AWS::Cognito::UserPool CustomEmailSender": { "LambdaArn": "The Amazon Resource Name (ARN) of the function that you want to assign to your Lambda trigger.", @@ -8530,7 +8694,7 @@ }, "AWS::Cognito::UserPool PasswordPolicy": { "MinimumLength": "The minimum length of the password in the policy that you have set. This value can't be less than 6.", - "PasswordHistorySize": "The number of previous passwords that you want Amazon Cognito to restrict each user from reusing. Users can't set a password that matches any of `n` previous passwords, where `n` is the value of `PasswordHistorySize` .\n\nPassword history isn't enforced and isn't displayed in [DescribeUserPool](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_DescribeUserPool.html) responses when you set this value to `0` or don't provide it. To activate this setting, [advanced security features](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pool-settings-advanced-security.html) must be active in your user pool.", + "PasswordHistorySize": "The number of previous passwords that you want Amazon Cognito to restrict each user from reusing. Users can't set a password that matches any of `n` previous passwords, where `n` is the value of `PasswordHistorySize` .\n\nPassword history isn't enforced and isn't displayed in [DescribeUserPool](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_DescribeUserPool.html) responses when you set this value to `0` or don't provide it. To activate this setting, your user pool must be in the [Essentials tier](https://docs.aws.amazon.com/cognito/latest/developerguide/feature-plans-features-essentials.html) or higher.", "RequireLowercase": "The requirement in a password policy that users must include at least one lowercase letter in their password.", "RequireNumbers": "The requirement in a password policy that users must include at least one number in their password.", "RequireSymbols": "The requirement in a password policy that users must include at least one symbol in their password.", @@ -8539,7 +8703,7 @@ }, "AWS::Cognito::UserPool Policies": { "PasswordPolicy": "The password policy settings for a user pool, including complexity, history, and length requirements.", - "SignInPolicy": "" + "SignInPolicy": "The policy for allowed types of authentication in a user pool. To activate this setting, your user pool must be in the [Essentials tier](https://docs.aws.amazon.com/cognito/latest/developerguide/feature-plans-features-essentials.html) or higher.\n\nThis data type is a request and response parameter of [CreateUserPool](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_CreateUserPool.html) and [UpdateUserPool](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_UpdateUserPool.html) , and a response parameter of [DescribeUserPool](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_DescribeUserPool.html) ." }, "AWS::Cognito::UserPool PreTokenGenerationConfig": { "LambdaArn": "The Amazon Resource Name (ARN) of the function that you want to assign to your Lambda trigger.\n\nThis parameter and the `PreTokenGeneration` property of `LambdaConfig` have the same value. For new instances of pre token generation triggers, set `LambdaArn` .", @@ -8559,7 +8723,7 @@ "StringAttributeConstraints": "Specifies the constraints for an attribute of the string type." }, "AWS::Cognito::UserPool SignInPolicy": { - "AllowedFirstAuthFactors": "" + "AllowedFirstAuthFactors": "The sign-in methods that a user pool supports as the first factor. You can permit users to start authentication with a standard username and password, or with other one-time password and hardware factors.\n\nSupports values of `EMAIL_OTP` , `SMS_OTP` , `WEB_AUTHN` and `PASSWORD` ," }, "AWS::Cognito::UserPool SmsConfiguration": { "ExternalId": "The external ID provides additional security for your IAM role. You can use an `ExternalId` with the IAM role that you use with Amazon SNS to send SMS messages for your user pool. If you provide an `ExternalId` , your Amazon Cognito user pool includes it in the request to assume your IAM role. You can configure the role trust policy to require that Amazon Cognito, and any principal, provide the `ExternalID` . If you use the Amazon Cognito Management Console to create a role for SMS multi-factor authentication (MFA), Amazon Cognito creates a role with the required permissions and a trust policy that demonstrates use of the `ExternalId` .\n\nFor more information about the `ExternalId` of a role, see [How to use an external ID when granting access to your AWS resources to a third party](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html) .", @@ -8574,7 +8738,7 @@ "AttributesRequireVerificationBeforeUpdate": "Requires that your user verifies their email address, phone number, or both before Amazon Cognito updates the value of that attribute. When you update a user attribute that has this option activated, Amazon Cognito sends a verification message to the new phone number or email address. Amazon Cognito doesn\u2019t change the value of the attribute until your user responds to the verification message and confirms the new value.\n\nYou can verify an updated email address or phone number with a [VerifyUserAttribute](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_VerifyUserAttribute.html) API request. You can also call the [AdminUpdateUserAttributes](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_AdminUpdateUserAttributes.html) API and set `email_verified` or `phone_number_verified` to true.\n\nWhen `AttributesRequireVerificationBeforeUpdate` is false, your user pool doesn't require that your users verify attribute changes before Amazon Cognito updates them. In a user pool where `AttributesRequireVerificationBeforeUpdate` is false, API operations that change attribute values can immediately update a user\u2019s `email` or `phone_number` attribute." }, "AWS::Cognito::UserPool UserPoolAddOns": { - "AdvancedSecurityAdditionalFlows": "", + "AdvancedSecurityAdditionalFlows": "Advanced security configuration options for additional authentication types in your user pool, including custom authentication.", "AdvancedSecurityMode": "The operating mode of advanced security features for standard authentication types in your user pool, including username-password and secure remote password (SRP) authentication." }, "AWS::Cognito::UserPool UsernameConfiguration": { @@ -8592,24 +8756,24 @@ "AccessTokenValidity": "The access token time limit. After this limit expires, your user can't use their access token. To specify the time unit for `AccessTokenValidity` as `seconds` , `minutes` , `hours` , or `days` , set a `TokenValidityUnits` value in your API request.\n\nFor example, when you set `AccessTokenValidity` to `10` and `TokenValidityUnits` to `hours` , your user can authorize access with\ntheir access token for 10 hours.\n\nThe default time unit for `AccessTokenValidity` in an API request is hours. *Valid range* is displayed below in seconds.\n\nIf you don't specify otherwise in the configuration of your app client, your access\ntokens are valid for one hour.", "AllowedOAuthFlows": "The OAuth grant types that you want your app client to generate. To create an app client that generates client credentials grants, you must add `client_credentials` as the only allowed OAuth flow.\n\n- **code** - Use a code grant flow, which provides an authorization code as the response. This code can be exchanged for access tokens with the `/oauth2/token` endpoint.\n- **implicit** - Issue the access token (and, optionally, ID token, based on scopes) directly to your user.\n- **client_credentials** - Issue the access token from the `/oauth2/token` endpoint directly to a non-person user using a combination of the client ID and client secret.", "AllowedOAuthFlowsUserPoolClient": "Set to `true` to use OAuth 2.0 features in your user pool app client.\n\n`AllowedOAuthFlowsUserPoolClient` must be `true` before you can configure the following features in your app client.\n\n- `CallBackURLs` : Callback URLs.\n- `LogoutURLs` : Sign-out redirect URLs.\n- `AllowedOAuthScopes` : OAuth 2.0 scopes.\n- `AllowedOAuthFlows` : Support for authorization code, implicit, and client credentials OAuth 2.0 grants.\n\nTo use OAuth 2.0 features, configure one of these features in the Amazon Cognito console or set `AllowedOAuthFlowsUserPoolClient` to `true` in a `CreateUserPoolClient` or `UpdateUserPoolClient` API request. If you don't set a value for `AllowedOAuthFlowsUserPoolClient` in a request with the AWS CLI or SDKs, it defaults to `false` .", - "AllowedOAuthScopes": "The allowed OAuth scopes. Possible values provided by OAuth are `phone` , `email` , `openid` , and `profile` . Possible values provided by AWS are `aws.cognito.signin.user.admin` . Custom scopes created in Resource Servers are also supported.", - "AnalyticsConfiguration": "The user pool analytics configuration for collecting metrics and sending them to your Amazon Pinpoint campaign.\n\n> In AWS Regions where Amazon Pinpoint isn't available, user pools only support sending events to Amazon Pinpoint projects in AWS Region us-east-1. In Regions where Amazon Pinpoint is available, user pools support sending events to Amazon Pinpoint projects within that same Region.", + "AllowedOAuthScopes": "The OAuth 2.0 scopes that you want to permit your app client to authorize. Scopes govern access control to user pool self-service API operations, user data from the `userInfo` endpoint, and third-party APIs. Possible values provided by OAuth are `phone` , `email` , `openid` , and `profile` . Possible values provided by AWS are `aws.cognito.signin.user.admin` . Custom scopes created in Resource Servers are also supported.", + "AnalyticsConfiguration": "The user pool analytics configuration for collecting metrics and sending them to your Amazon Pinpoint campaign.\n\nIn AWS Regions where Amazon Pinpoint isn't available, user pools might not have access to analytics or might be configurable with campaigns in the US East (N. Virginia) Region. For more information, see [Using Amazon Pinpoint analytics](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pools-pinpoint-integration.html) .", "AuthSessionValidity": "Amazon Cognito creates a session token for each API request in an authentication flow. `AuthSessionValidity` is the duration, in minutes, of that session token. Your user pool native user must respond to each authentication challenge before the session expires.", - "CallbackURLs": "A list of allowed redirect (callback) URLs for the IdPs.\n\nA redirect URI must:\n\n- Be an absolute URI.\n- Be registered with the authorization server.\n- Not include a fragment component.\n\nSee [OAuth 2.0 - Redirection Endpoint](https://docs.aws.amazon.com/https://tools.ietf.org/html/rfc6749#section-3.1.2) .\n\nAmazon Cognito requires HTTPS over HTTP except for http://localhost for testing purposes only.\n\nApp callback URLs such as myapp://example are also supported.", - "ClientName": "The client name for the user pool client you would like to create.", - "DefaultRedirectURI": "The default redirect URI. In app clients with one assigned IdP, replaces `redirect_uri` in authentication requests. Must be in the `CallbackURLs` list.\n\nA redirect URI must:\n\n- Be an absolute URI.\n- Be registered with the authorization server.\n- Not include a fragment component.\n\nFor more information, see [Default redirect URI](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-settings-client-apps.html#cognito-user-pools-app-idp-settings-about) .\n\nAmazon Cognito requires HTTPS over HTTP except for http://localhost for testing purposes only.\n\nApp callback URLs such as myapp://example are also supported.", + "CallbackURLs": "A list of allowed redirect (callback) URLs for the IdPs.\n\nA redirect URI must:\n\n- Be an absolute URI.\n- Be registered with the authorization server. Amazon Cognito doesn't accept authorization requests with `redirect_uri` values that aren't in the list of `CallbackURLs` that you provide in this parameter.\n- Not include a fragment component.\n\nSee [OAuth 2.0 - Redirection Endpoint](https://docs.aws.amazon.com/https://tools.ietf.org/html/rfc6749#section-3.1.2) .\n\nAmazon Cognito requires HTTPS over HTTP except for http://localhost for testing purposes only.\n\nApp callback URLs such as myapp://example are also supported.", + "ClientName": "A friendly name for the app client that you want to create.", + "DefaultRedirectURI": "The default redirect URI. In app clients with one assigned IdP, replaces `redirect_uri` in authentication requests. Must be in the `CallbackURLs` list.", "EnablePropagateAdditionalUserContextData": "Activates the propagation of additional user context data. For more information about propagation of user context data, see [Adding advanced security to a user pool](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pool-settings-threat-protection.html) . If you don\u2019t include this parameter, you can't send device fingerprint information, including source IP address, to Amazon Cognito advanced security. You can only activate `EnablePropagateAdditionalUserContextData` in an app client that has a client secret.", "EnableTokenRevocation": "Activates or deactivates token revocation. For more information about revoking tokens, see [RevokeToken](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_RevokeToken.html) .\n\nIf you don't include this parameter, token revocation is automatically activated for the new user pool client.", - "ExplicitAuthFlows": "The authentication flows that you want your user pool client to support. For each app client in your user pool, you can sign in your users with any combination of one or more flows, including with a user name and Secure Remote Password (SRP), a user name and password, or a custom authentication process that you define with Lambda functions.\n\n> If you don't specify a value for `ExplicitAuthFlows` , your user client supports `ALLOW_REFRESH_TOKEN_AUTH` , `ALLOW_USER_SRP_AUTH` , and `ALLOW_CUSTOM_AUTH` . \n\nValid values include:\n\n- `ALLOW_USER_AUTH` : Enable selection-based sign-in with `USER_AUTH` . This setting covers username-password, secure remote password (SRP), passwordless, and passkey authentication. This authentiation flow can do username-password and SRP authentication without other `ExplicitAuthFlows` permitting them. For example users can complete an SRP challenge through `USER_AUTH` without the flow `USER_SRP_AUTH` being active for the app client. This flow doesn't include `CUSTOM_AUTH` .\n- `ALLOW_ADMIN_USER_PASSWORD_AUTH` : Enable admin based user password authentication flow `ADMIN_USER_PASSWORD_AUTH` . This setting replaces the `ADMIN_NO_SRP_AUTH` setting. With this authentication flow, your app passes a user name and password to Amazon Cognito in the request, instead of using the Secure Remote Password (SRP) protocol to securely transmit the password.\n- `ALLOW_CUSTOM_AUTH` : Enable Lambda trigger based authentication.\n- `ALLOW_USER_PASSWORD_AUTH` : Enable user password-based authentication. In this flow, Amazon Cognito receives the password in the request instead of using the SRP protocol to verify passwords.\n- `ALLOW_USER_SRP_AUTH` : Enable SRP-based authentication.\n- `ALLOW_REFRESH_TOKEN_AUTH` : Enable authflow to refresh tokens.\n\nIn some environments, you will see the values `ADMIN_NO_SRP_AUTH` , `CUSTOM_AUTH_FLOW_ONLY` , or `USER_PASSWORD_AUTH` . You can't assign these legacy `ExplicitAuthFlows` values to user pool clients at the same time as values that begin with `ALLOW_` ,\nlike `ALLOW_USER_SRP_AUTH` .", - "GenerateSecret": "Boolean to specify whether you want to generate a secret for the user pool client being created.", + "ExplicitAuthFlows": "The authentication flows that you want your user pool client to support. For each app client in your user pool, you can sign in your users with any combination of one or more flows, including with a user name and Secure Remote Password (SRP), a user name and password, or a custom authentication process that you define with Lambda functions.\n\n> If you don't specify a value for `ExplicitAuthFlows` , your user client supports `ALLOW_REFRESH_TOKEN_AUTH` , `ALLOW_USER_SRP_AUTH` , and `ALLOW_CUSTOM_AUTH` . \n\nValid values include:\n\n- `ALLOW_USER_AUTH` : Enable selection-based sign-in with `USER_AUTH` . This setting covers username-password, secure remote password (SRP), passwordless, and passkey authentication. This authentiation flow can do username-password and SRP authentication without other `ExplicitAuthFlows` permitting them. For example users can complete an SRP challenge through `USER_AUTH` without the flow `USER_SRP_AUTH` being active for the app client. This flow doesn't include `CUSTOM_AUTH` .\n\nTo activate this setting, your user pool must be in the [Essentials tier](https://docs.aws.amazon.com/cognito/latest/developerguide/feature-plans-features-essentials.html) or higher.\n- `ALLOW_ADMIN_USER_PASSWORD_AUTH` : Enable admin based user password authentication flow `ADMIN_USER_PASSWORD_AUTH` . This setting replaces the `ADMIN_NO_SRP_AUTH` setting. With this authentication flow, your app passes a user name and password to Amazon Cognito in the request, instead of using the Secure Remote Password (SRP) protocol to securely transmit the password.\n- `ALLOW_CUSTOM_AUTH` : Enable Lambda trigger based authentication.\n- `ALLOW_USER_PASSWORD_AUTH` : Enable user password-based authentication. In this flow, Amazon Cognito receives the password in the request instead of using the SRP protocol to verify passwords.\n- `ALLOW_USER_SRP_AUTH` : Enable SRP-based authentication.\n- `ALLOW_REFRESH_TOKEN_AUTH` : Enable authflow to refresh tokens.\n\nIn some environments, you will see the values `ADMIN_NO_SRP_AUTH` , `CUSTOM_AUTH_FLOW_ONLY` , or `USER_PASSWORD_AUTH` . You can't assign these legacy `ExplicitAuthFlows` values to user pool clients at the same time as values that begin with `ALLOW_` ,\nlike `ALLOW_USER_SRP_AUTH` .", + "GenerateSecret": "When `true` , generates a client secret for the app client. Client secrets are used with server-side and machine-to-machine applications. For more information, see [App client types](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-settings-client-apps.html#user-pool-settings-client-app-client-types) .", "IdTokenValidity": "The ID token time limit. After this limit expires, your user can't use their ID token. To specify the time unit for `IdTokenValidity` as `seconds` , `minutes` , `hours` , or `days` , set a `TokenValidityUnits` value in your API request.\n\nFor example, when you set `IdTokenValidity` as `10` and `TokenValidityUnits` as `hours` , your user can authenticate their session with their ID token for 10 hours.\n\nThe default time unit for `IdTokenValidity` in an API request is hours. *Valid range* is displayed below in seconds.\n\nIf you don't specify otherwise in the configuration of your app client, your ID\ntokens are valid for one hour.", - "LogoutURLs": "A list of allowed logout URLs for the IdPs.", + "LogoutURLs": "A list of allowed logout URLs for managed login authentication. For more information, see [Logout endpoint](https://docs.aws.amazon.com/cognito/latest/developerguide/logout-endpoint.html) .", "PreventUserExistenceErrors": "Errors and responses that you want Amazon Cognito APIs to return during authentication, account confirmation, and password recovery when the user doesn't exist in the user pool. When set to `ENABLED` and the user doesn't exist, authentication returns an error indicating either the username or password was incorrect. Account confirmation and password recovery return a response indicating a code was sent to a simulated destination. When set to `LEGACY` , those APIs return a `UserNotFoundException` exception if the user doesn't exist in the user pool.\n\nValid values include:\n\n- `ENABLED` - This prevents user existence-related errors.\n- `LEGACY` - This represents the early behavior of Amazon Cognito where user existence related errors aren't prevented.\n\nDefaults to `LEGACY` when you don't provide a value.", "ReadAttributes": "The list of user attributes that you want your app client to have read access to. After your user authenticates in your app, their access token authorizes them to read their own attribute value for any attribute in this list. An example of this kind of activity is when your user selects a link to view their profile information. Your app makes a [GetUser](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_GetUser.html) API request to retrieve and display your user's profile data.\n\nWhen you don't specify the `ReadAttributes` for your app client, your app can read the values of `email_verified` , `phone_number_verified` , and the Standard attributes of your user pool. When your user pool app client has read access to these default attributes, `ReadAttributes` doesn't return any information. Amazon Cognito only populates `ReadAttributes` in the API response if you have specified your own custom set of read attributes.", "RefreshTokenValidity": "The refresh token time limit. After this limit expires, your user can't use their refresh token. To specify the time unit for `RefreshTokenValidity` as `seconds` , `minutes` , `hours` , or `days` , set a `TokenValidityUnits` value in your API request.\n\nFor example, when you set `RefreshTokenValidity` as `10` and `TokenValidityUnits` as `days` , your user can refresh their session\nand retrieve new access and ID tokens for 10 days.\n\nThe default time unit for `RefreshTokenValidity` in an API request is days. You can't set `RefreshTokenValidity` to 0. If you do, Amazon Cognito overrides the value with the default value of 30 days. *Valid range* is displayed below in seconds.\n\nIf you don't specify otherwise in the configuration of your app client, your refresh\ntokens are valid for 30 days.", - "SupportedIdentityProviders": "A list of provider names for the identity providers (IdPs) that are supported on this client. The following are supported: `COGNITO` , `Facebook` , `Google` , `SignInWithApple` , and `LoginWithAmazon` . You can also specify the names that you configured for the SAML and OIDC IdPs in your user pool, for example `MySAMLIdP` or `MyOIDCIdP` .\n\nThis setting applies to providers that you can access with the [hosted UI and OAuth 2.0 authorization server](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pools-app-integration.html) . The removal of `COGNITO` from this list doesn't prevent authentication operations for local users with the user pools API in an AWS SDK. The only way to prevent API-based authentication is to block access with a [AWS WAF rule](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-waf.html) .", - "TokenValidityUnits": "The units in which the validity times are represented. The default unit for RefreshToken is days, and default for ID and access tokens are hours.", - "UserPoolId": "The user pool ID for the user pool where you want to create a user pool client.", + "SupportedIdentityProviders": "A list of provider names for the identity providers (IdPs) that are supported on this client. The following are supported: `COGNITO` , `Facebook` , `Google` , `SignInWithApple` , and `LoginWithAmazon` . You can also specify the names that you configured for the SAML and OIDC IdPs in your user pool, for example `MySAMLIdP` or `MyOIDCIdP` .\n\nThis setting applies to providers that you can access with [managed login](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pools-managed-login.html) . The removal of `COGNITO` from this list doesn't prevent authentication operations for local users with the user pools API in an AWS SDK. The only way to prevent API-based authentication is to block access with a [AWS WAF rule](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-waf.html) .", + "TokenValidityUnits": "The units that validity times are represented in. The default unit for refresh tokens is days, and the default for ID and access tokens are hours.", + "UserPoolId": "The ID of the user pool where you want to create an app client.", "WriteAttributes": "The list of user attributes that you want your app client to have write access to. After your user authenticates in your app, their access token authorizes them to set or modify their own attribute value for any attribute in this list. An example of this kind of activity is when you present your user with a form to update their profile information and they change their last name. Your app then makes an [UpdateUserAttributes](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_UpdateUserAttributes.html) API request and sets `family_name` to the new value.\n\nWhen you don't specify the `WriteAttributes` for your app client, your app can write the values of the Standard attributes of your user pool. When your user pool has write access to these default attributes, `WriteAttributes` doesn't return any information. Amazon Cognito only populates `WriteAttributes` in the API response if you have specified your own custom set of write attributes.\n\nIf your app client allows users to sign in through an IdP, this array must include all attributes that you have mapped to IdP attributes. Amazon Cognito updates mapped attributes when users sign in to your application through an IdP. If your app client does not have write access to a mapped attribute, Amazon Cognito throws an error when it tries to update the attribute. For more information, see [Specifying IdP Attribute Mappings for Your user pool](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pools-specifying-attribute-mapping.html) ." }, "AWS::Cognito::UserPoolClient AnalyticsConfiguration": { @@ -8627,31 +8791,32 @@ "AWS::Cognito::UserPoolDomain": { "CustomDomainConfig": "The configuration for a custom domain that hosts the sign-up and sign-in pages for your application. Use this object to specify an SSL certificate that is managed by ACM.\n\nWhen you create a custom domain, the passkey RP ID defaults to the custom domain. If you had a prefix domain active, this will cause passkey integration for your prefix domain to stop working due to a mismatch in RP ID. To keep the prefix domain passkey integration working, you can explicitly set RP ID to the prefix domain. Update the RP ID in a [SetUserPoolMfaConfig](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_SetUserPoolMfaConfig.html) request.", "Domain": "The domain name for the custom domain that hosts the sign-up and sign-in pages for your application. One example might be `auth.example.com` .\n\nThis string can include only lowercase letters, numbers, and hyphens. Don't use a hyphen for the first or last character. Use periods to separate subdomain names.", + "ManagedLoginVersion": "A version number that indicates the state of managed login for your domain. Version `1` is hosted UI (classic). Version `2` is the newer managed login with the branding designer. For more information, see [Managed login](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pools-managed-login.html) .", "UserPoolId": "The ID of the user pool that is associated with the custom domain whose certificate you're updating." }, "AWS::Cognito::UserPoolDomain CustomDomainConfigType": { "CertificateArn": "The Amazon Resource Name (ARN) of an AWS Certificate Manager SSL certificate. You use this certificate for the subdomain of your custom domain." }, "AWS::Cognito::UserPoolGroup": { - "Description": "A string containing the description of the group.", - "GroupName": "The name of the group. Must be unique.", + "Description": "A description of the group that you're creating.", + "GroupName": "A name for the group. This name must be unique in your user pool.", "Precedence": "A non-negative integer value that specifies the precedence of this group relative to the other groups that a user can belong to in the user pool. Zero is the highest precedence value. Groups with lower `Precedence` values take precedence over groups with higher or null `Precedence` values. If a user belongs to two or more groups, it is the group with the lowest precedence value whose role ARN is given in the user's tokens for the `cognito:roles` and `cognito:preferred_role` claims.\n\nTwo groups can have the same `Precedence` value. If this happens, neither group takes precedence over the other. If two groups with the same `Precedence` have the same role ARN, that role is used in the `cognito:preferred_role` claim in tokens for users in each group. If the two groups have different role ARNs, the `cognito:preferred_role` claim isn't set in users' tokens.\n\nThe default `Precedence` value is null. The maximum `Precedence` value is `2^31-1` .", - "RoleArn": "The role Amazon Resource Name (ARN) for the group.", - "UserPoolId": "The user pool ID for the user pool." + "RoleArn": "The Amazon Resource Name (ARN) for the IAM role that you want to associate with the group. A group role primarily declares a preferred role for the credentials that you get from an identity pool. Amazon Cognito ID tokens have a `cognito:preferred_role` claim that presents the highest-precedence group that a user belongs to. Both ID and access tokens also contain a `cognito:groups` claim that list all the groups that a user is a member of.", + "UserPoolId": "The ID of the user pool where you want to create a user group." }, "AWS::Cognito::UserPoolIdentityProvider": { - "AttributeMapping": "A mapping of IdP attributes to standard and custom user pool attributes.", - "IdpIdentifiers": "A list of IdP identifiers.", + "AttributeMapping": "A mapping of IdP attributes to standard and custom user pool attributes. Specify a user pool attribute as the key of the key-value pair, and the IdP attribute claim name as the value.", + "IdpIdentifiers": "An array of IdP identifiers, for example `\"IdPIdentifiers\": [ \"MyIdP\", \"MyIdP2\" ]` . Identifiers are friendly names that you can pass in the `idp_identifier` query parameter of requests to the [Authorize endpoint](https://docs.aws.amazon.com/cognito/latest/developerguide/authorization-endpoint.html) to silently redirect to sign-in with the associated IdP. Identifiers in a domain format also enable the use of [email-address matching with SAML providers](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pools-managing-saml-idp-naming.html) .", "ProviderDetails": "The scopes, URLs, and identifiers for your external identity provider. The following\nexamples describe the provider detail keys for each IdP type. These values and their\nschema are subject to change. Social IdP `authorize_scopes` values must match\nthe values listed here.\n\n- **OpenID Connect (OIDC)** - Amazon Cognito accepts the following elements when it can't discover endpoint URLs from `oidc_issuer` : `attributes_url` , `authorize_url` , `jwks_uri` , `token_url` .\n\nCreate or update request: `\"ProviderDetails\": { \"attributes_request_method\": \"GET\", \"attributes_url\": \"https://auth.example.com/userInfo\", \"authorize_scopes\": \"openid profile email\", \"authorize_url\": \"https://auth.example.com/authorize\", \"client_id\": \"1example23456789\", \"client_secret\": \"provider-app-client-secret\", \"jwks_uri\": \"https://auth.example.com/.well-known/jwks.json\", \"oidc_issuer\": \"https://auth.example.com\", \"token_url\": \"https://example.com/token\" }`\n\nDescribe response: `\"ProviderDetails\": { \"attributes_request_method\": \"GET\", \"attributes_url\": \"https://auth.example.com/userInfo\", \"attributes_url_add_attributes\": \"false\", \"authorize_scopes\": \"openid profile email\", \"authorize_url\": \"https://auth.example.com/authorize\", \"client_id\": \"1example23456789\", \"client_secret\": \"provider-app-client-secret\", \"jwks_uri\": \"https://auth.example.com/.well-known/jwks.json\", \"oidc_issuer\": \"https://auth.example.com\", \"token_url\": \"https://example.com/token\" }`\n- **SAML** - Create or update request with Metadata URL: `\"ProviderDetails\": { \"IDPInit\": \"true\", \"IDPSignout\": \"true\", \"EncryptedResponses\" : \"true\", \"MetadataURL\": \"https://auth.example.com/sso/saml/metadata\", \"RequestSigningAlgorithm\": \"rsa-sha256\" }`\n\nCreate or update request with Metadata file: `\"ProviderDetails\": { \"IDPInit\": \"true\", \"IDPSignout\": \"true\", \"EncryptedResponses\" : \"true\", \"MetadataFile\": \"[metadata XML]\", \"RequestSigningAlgorithm\": \"rsa-sha256\" }`\n\nThe value of `MetadataFile` must be the plaintext metadata document with all quote (\") characters escaped by backslashes.\n\nDescribe response: `\"ProviderDetails\": { \"IDPInit\": \"true\", \"IDPSignout\": \"true\", \"EncryptedResponses\" : \"true\", \"ActiveEncryptionCertificate\": \"[certificate]\", \"MetadataURL\": \"https://auth.example.com/sso/saml/metadata\", \"RequestSigningAlgorithm\": \"rsa-sha256\", \"SLORedirectBindingURI\": \"https://auth.example.com/slo/saml\", \"SSORedirectBindingURI\": \"https://auth.example.com/sso/saml\" }`\n- **LoginWithAmazon** - Create or update request: `\"ProviderDetails\": { \"authorize_scopes\": \"profile postal_code\", \"client_id\": \"amzn1.application-oa2-client.1example23456789\", \"client_secret\": \"provider-app-client-secret\"`\n\nDescribe response: `\"ProviderDetails\": { \"attributes_url\": \"https://api.amazon.com/user/profile\", \"attributes_url_add_attributes\": \"false\", \"authorize_scopes\": \"profile postal_code\", \"authorize_url\": \"https://www.amazon.com/ap/oa\", \"client_id\": \"amzn1.application-oa2-client.1example23456789\", \"client_secret\": \"provider-app-client-secret\", \"token_request_method\": \"POST\", \"token_url\": \"https://api.amazon.com/auth/o2/token\" }`\n- **Google** - Create or update request: `\"ProviderDetails\": { \"authorize_scopes\": \"email profile openid\", \"client_id\": \"1example23456789.apps.googleusercontent.com\", \"client_secret\": \"provider-app-client-secret\" }`\n\nDescribe response: `\"ProviderDetails\": { \"attributes_url\": \"https://people.googleapis.com/v1/people/me?personFields=\", \"attributes_url_add_attributes\": \"true\", \"authorize_scopes\": \"email profile openid\", \"authorize_url\": \"https://accounts.google.com/o/oauth2/v2/auth\", \"client_id\": \"1example23456789.apps.googleusercontent.com\", \"client_secret\": \"provider-app-client-secret\", \"oidc_issuer\": \"https://accounts.google.com\", \"token_request_method\": \"POST\", \"token_url\": \"https://www.googleapis.com/oauth2/v4/token\" }`\n- **SignInWithApple** - Create or update request: `\"ProviderDetails\": { \"authorize_scopes\": \"email name\", \"client_id\": \"com.example.cognito\", \"private_key\": \"1EXAMPLE\", \"key_id\": \"2EXAMPLE\", \"team_id\": \"3EXAMPLE\" }`\n\nDescribe response: `\"ProviderDetails\": { \"attributes_url_add_attributes\": \"false\", \"authorize_scopes\": \"email name\", \"authorize_url\": \"https://appleid.apple.com/auth/authorize\", \"client_id\": \"com.example.cognito\", \"key_id\": \"1EXAMPLE\", \"oidc_issuer\": \"https://appleid.apple.com\", \"team_id\": \"2EXAMPLE\", \"token_request_method\": \"POST\", \"token_url\": \"https://appleid.apple.com/auth/token\" }`\n- **Facebook** - Create or update request: `\"ProviderDetails\": { \"api_version\": \"v17.0\", \"authorize_scopes\": \"public_profile, email\", \"client_id\": \"1example23456789\", \"client_secret\": \"provider-app-client-secret\" }`\n\nDescribe response: `\"ProviderDetails\": { \"api_version\": \"v17.0\", \"attributes_url\": \"https://graph.facebook.com/v17.0/me?fields=\", \"attributes_url_add_attributes\": \"true\", \"authorize_scopes\": \"public_profile, email\", \"authorize_url\": \"https://www.facebook.com/v17.0/dialog/oauth\", \"client_id\": \"1example23456789\", \"client_secret\": \"provider-app-client-secret\", \"token_request_method\": \"GET\", \"token_url\": \"https://graph.facebook.com/v17.0/oauth/access_token\" }`", - "ProviderName": "The IdP name.", - "ProviderType": "The IdP type.", - "UserPoolId": "The user pool ID." + "ProviderName": "The name that you want to assign to the IdP. You can pass the identity provider name in the `identity_provider` query parameter of requests to the [Authorize endpoint](https://docs.aws.amazon.com/cognito/latest/developerguide/authorization-endpoint.html) to silently redirect to sign-in with the associated IdP.", + "ProviderType": "The type of IdP that you want to add. Amazon Cognito supports OIDC, SAML 2.0, Login With Amazon, Sign In With Apple, Google, and Facebook IdPs.", + "UserPoolId": "The Id of the user pool where you want to create an IdP." }, "AWS::Cognito::UserPoolResourceServer": { "Identifier": "A unique resource server identifier for the resource server. The identifier can be an API friendly name like `solar-system-data` . You can also set an API URL like `https://solar-system-data-api.example.com` as your identifier.\n\nAmazon Cognito represents scopes in the access token in the format `$resource-server-identifier/$scope` . Longer scope-identifier strings increase the size of your access tokens.", "Name": "A friendly name for the resource server.", "Scopes": "A list of scopes. Each scope is a map with keys `ScopeName` and `ScopeDescription` .", - "UserPoolId": "The user pool ID for the user pool." + "UserPoolId": "The ID of the user pool where you want to create a resource server." }, "AWS::Cognito::UserPoolResourceServer ResourceServerScopeType": { "ScopeDescription": "A friendly description of a custom scope.", @@ -8704,15 +8869,15 @@ "AWS::Cognito::UserPoolUICustomizationAttachment": { "CSS": "The CSS values in the UI customization.", "ClientId": "The app client ID for your UI customization. When this value isn't present, the customization applies to all user pool app clients that don't have client-level settings..", - "UserPoolId": "The user pool ID for the user pool." + "UserPoolId": "The ID of the user pool." }, "AWS::Cognito::UserPoolUser": { - "ClientMetadata": "A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.\n\nYou create custom workflows by assigning AWS Lambda functions to user pool triggers. When you use the AdminCreateUser API action, Amazon Cognito invokes the function that is assigned to the *pre sign-up* trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a `clientMetadata` attribute, which provides the data that you assigned to the ClientMetadata parameter in your AdminCreateUser request. In your function code in AWS Lambda , you can process the `clientMetadata` value to enhance your workflow for your specific needs.\n\nFor more information, see [Customizing user pool Workflows with Lambda Triggers](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-identity-pools-working-with-aws-lambda-triggers.html) in the *Amazon Cognito Developer Guide* .\n\n> When you use the ClientMetadata parameter, remember that Amazon Cognito won't do the following:\n> \n> - Store the ClientMetadata value. This data is available only to AWS Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration doesn't include triggers, the ClientMetadata parameter serves no purpose.\n> - Validate the ClientMetadata value.\n> - Encrypt the ClientMetadata value. Don't use Amazon Cognito to provide sensitive information.", - "DesiredDeliveryMediums": "Specify `\"EMAIL\"` if email will be used to send the welcome message. Specify `\"SMS\"` if the phone number will be used. The default value is `\"SMS\"` . You can specify more than one value.", - "ForceAliasCreation": "This parameter is used only if the `phone_number_verified` or `email_verified` attribute is set to `True` . Otherwise, it is ignored.\n\nIf this parameter is set to `True` and the phone number or email address specified in the UserAttributes parameter already exists as an alias with a different user, the API call will migrate the alias from the previous user to the newly created user. The previous user will no longer be able to log in using that alias.\n\nIf this parameter is set to `False` , the API throws an `AliasExistsException` error if the alias already exists. The default value is `False` .", - "MessageAction": "Set to `RESEND` to resend the invitation message to a user that already exists and reset the expiration limit on the user's account. Set to `SUPPRESS` to suppress sending the message. You can specify only one value.", + "ClientMetadata": "A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.\n\nYou create custom workflows by assigning AWS Lambda functions to user pool triggers. When you use the AdminCreateUser API action, Amazon Cognito invokes the function that is assigned to the *pre sign-up* trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a `ClientMetadata` attribute, which provides the data that you assigned to the ClientMetadata parameter in your AdminCreateUser request. In your function code in AWS Lambda , you can process the `clientMetadata` value to enhance your workflow for your specific needs.\n\nFor more information, see [Using Lambda triggers](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-identity-pools-working-with-aws-lambda-triggers.html) in the *Amazon Cognito Developer Guide* .\n\n> When you use the `ClientMetadata` parameter, note that Amazon Cognito won't do the following:\n> \n> - Store the `ClientMetadata` value. This data is available only to AWS Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration doesn't include triggers, the `ClientMetadata` parameter serves no purpose.\n> - Validate the `ClientMetadata` value.\n> - Encrypt the `ClientMetadata` value. Don't send sensitive information in this parameter.", + "DesiredDeliveryMediums": "Specify `EMAIL` if email will be used to send the welcome message. Specify `SMS` if the phone number will be used. The default value is `SMS` . You can specify more than one value.", + "ForceAliasCreation": "This parameter is used only if the `phone_number_verified` or `email_verified` attribute is set to `True` . Otherwise, it is ignored.\n\nIf this parameter is set to `True` and the phone number or email address specified in the `UserAttributes` parameter already exists as an alias with a different user, this request migrates the alias from the previous user to the newly-created user. The previous user will no longer be able to log in using that alias.\n\nIf this parameter is set to `False` , the API throws an `AliasExistsException` error if the alias already exists. The default value is `False` .", + "MessageAction": "Set to `RESEND` to resend the invitation message to a user that already exists, and to reset the temporary-password duration with a new temporary password. Set to `SUPPRESS` to suppress sending the message. You can specify only one value.", "UserAttributes": "An array of name-value pairs that contain user attributes and attribute values to be set for the user to be created. You can create a user without specifying any attributes other than `Username` . However, any attributes that you specify as required (when creating a user pool or in the *Attributes* tab of the console) either you should supply (in your call to `AdminCreateUser` ) or the user should supply (when they sign up in response to your welcome message).\n\nFor custom attributes, you must prepend the `custom:` prefix to the attribute name.\n\nTo send a message inviting the user to sign up, you must specify the user's email address or phone number. You can do this in your call to AdminCreateUser or in the *Users* tab of the Amazon Cognito console for managing your user pools.\n\nYou must also provide an email address or phone number when you expect the user to do passwordless sign-in with an email or SMS OTP. These attributes must be provided when passwordless options are the only available, or when you don't submit a `TemporaryPassword` .\n\nIn your call to `AdminCreateUser` , you can set the `email_verified` attribute to `True` , and you can set the `phone_number_verified` attribute to `True` . You can also do this by calling [AdminUpdateUserAttributes](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_AdminUpdateUserAttributes.html) .\n\n- *email* : The email address of the user to whom the message that contains the code and username will be sent. Required if the `email_verified` attribute is set to `True` , or if `\"EMAIL\"` is specified in the `DesiredDeliveryMediums` parameter.\n- *phone_number* : The phone number of the user to whom the message that contains the code and username will be sent. Required if the `phone_number_verified` attribute is set to `True` , or if `\"SMS\"` is specified in the `DesiredDeliveryMediums` parameter.", - "UserPoolId": "The user pool ID for the user pool where the user will be created.", + "UserPoolId": "The ID of the user pool where you want to create a user.", "Username": "The value that you want to set as the username sign-in attribute. The following conditions apply to the username parameter.\n\n- The username can't be a duplicate of another username in the same user pool.\n- You can't change the value of a username after you create it.\n- You can only provide a value if usernames are a valid sign-in attribute for your user pool. If your user pool only supports phone numbers or email addresses as sign-in attributes, Amazon Cognito automatically generates a username value. For more information, see [Customizing sign-in attributes](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-settings-attributes.html#user-pool-settings-aliases) .", "ValidationData": "Temporary user attributes that contribute to the outcomes of your pre sign-up Lambda trigger. This set of key-value pairs are for custom validation of information that you collect from your users but don't need to retain.\n\nYour Lambda function can analyze this additional data and act on it. Your function might perform external API operations like logging user attributes and validation data to Amazon CloudWatch Logs. Validation data might also affect the response that your function returns to Amazon Cognito, like automatically confirming the user if they sign up from within your network.\n\nFor more information about the pre sign-up Lambda trigger, see [Pre sign-up Lambda trigger](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-lambda-pre-sign-up.html) ." }, @@ -8722,7 +8887,7 @@ }, "AWS::Cognito::UserPoolUserToGroupAttachment": { "GroupName": "The name of the group that you want to add your user to.", - "UserPoolId": "The user pool ID for the user pool.", + "UserPoolId": "The ID of the user pool that contains the group that you want to add the user to.", "Username": "The user's username." }, "AWS::Comprehend::DocumentClassifier": { @@ -9072,11 +9237,11 @@ "DisplayName": "The display name of email address.", "EmailAddress": "The email address with the instance, in [^\\s@]+@[^\\s@]+\\.[^\\s@]+ format.", "InstanceArn": "The Amazon Resource Name (ARN) of the instance.", - "Tags": "" + "Tags": "An array of key-value pairs to apply to this resource." }, "AWS::Connect::EmailAddress Tag": { - "Key": "", - "Value": "" + "Key": "The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -", + "Value": "The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -" }, "AWS::Connect::EvaluationForm": { "Description": "The description of the evaluation form.\n\n*Length Constraints* : Minimum length of 0. Maximum length of 1024.", @@ -9178,25 +9343,25 @@ "StartTime": "The start time that your contact center opens." }, "AWS::Connect::HoursOfOperation HoursOfOperationOverride": { - "EffectiveFrom": "", - "EffectiveTill": "", - "HoursOfOperationOverrideId": "", + "EffectiveFrom": "The date from which the hours of operation override would be effective.", + "EffectiveTill": "The date till which the hours of operation override would be effective.", + "HoursOfOperationOverrideId": "The identifier for the hours of operation override.", "OverrideConfig": "", "OverrideDescription": "", "OverrideName": "" }, "AWS::Connect::HoursOfOperation HoursOfOperationOverrideConfig": { - "Day": "", - "EndTime": "", - "StartTime": "" + "Day": "The day that the hours of operation override applies to.", + "EndTime": "The end time that your contact center closes if overrides are applied.", + "StartTime": "The start time when your contact center opens if overrides are applied." }, "AWS::Connect::HoursOfOperation HoursOfOperationTimeSlice": { "Hours": "The hours.", "Minutes": "The minutes." }, "AWS::Connect::HoursOfOperation OverrideTimeSlice": { - "Hours": "", - "Minutes": "" + "Hours": "The hours.", + "Minutes": "The minutes." }, "AWS::Connect::HoursOfOperation Tag": { "Key": "The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -", @@ -9260,7 +9425,7 @@ "CountryCode": "The ISO country code.", "Description": "The description of the phone number.", "Prefix": "The prefix of the phone number. If provided, it must contain `+` as part of the country code.\n\n*Pattern* : `^\\\\+[0-9]{1,15}`", - "SourcePhoneNumberArn": "The claimed phone number ARN that was previously imported from the external service, such as Amazon Pinpoint. If it is from Amazon Pinpoint, it looks like the ARN of the phone number that was imported from Amazon Pinpoint.", + "SourcePhoneNumberArn": "The claimed phone number ARN that was previously imported from the external service, such as AWS End User Messaging. If it is from AWS End User Messaging, it looks like the ARN of the phone number that was imported from AWS End User Messaging.", "Tags": "The tags used to organize, track, or control access for this resource. For example, { \"tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }.", "TargetArn": "The Amazon Resource Name (ARN) for Amazon Connect instances or traffic distribution group that phone numbers are claimed to.", "Type": "The type of phone number." @@ -9295,6 +9460,7 @@ "MaxContacts": "The maximum number of contacts that can be in the queue before it is considered full.", "Name": "The name of the queue.", "OutboundCallerConfig": "The outbound caller ID name, number, and outbound whisper flow.", + "OutboundEmailConfig": "The outbound email address ID for a specified queue.", "QuickConnectArns": "The Amazon Resource Names (ARN) of the of the quick connects available to agents who are working the queue.", "Status": "The status of the queue.", "Tags": "The tags used to organize, track, or control access for this resource. For example, { \"Tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }." @@ -9304,6 +9470,9 @@ "OutboundCallerIdNumberArn": "The Amazon Resource Name (ARN) of the outbound caller ID number.\n\n> Only use the phone number ARN format that doesn't contain `instance` in the path, for example, `arn:aws:connect:us-east-1:1234567890:phone-number/uuid` . This is the same ARN format that is returned when you create a phone number using CloudFormation , or when you call the [ListPhoneNumbersV2](https://docs.aws.amazon.com/connect/latest/APIReference/API_ListPhoneNumbersV2.html) API.", "OutboundFlowArn": "The Amazon Resource Name (ARN) of the outbound flow." }, + "AWS::Connect::Queue OutboundEmailConfig": { + "OutboundEmailAddressId": "The identifier of the email address." + }, "AWS::Connect::Queue Tag": { "Key": "The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -", "Value": "The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -" @@ -9471,6 +9640,7 @@ "Fields": "Fields that are part of the template. A template requires at least one field that has type `Name` .", "InstanceArn": "The Amazon Resource Name (ARN) of the Amazon Connect instance.", "Name": "The name of the task template.", + "SelfAssignContactFlowArn": "The Amazon Resource Name (ARN) of the flow.", "Status": "The status of the task template.", "Tags": "The tags used to organize, track, or control access for this resource." }, @@ -9534,8 +9704,8 @@ }, "AWS::Connect::User UserIdentityInfo": { "Email": "The email address. If you are using SAML for identity management and include this parameter, an error is returned.", - "FirstName": "The first name. This is required if you are using Amazon Connect or SAML for identity management.", - "LastName": "The last name. This is required if you are using Amazon Connect or SAML for identity management.", + "FirstName": "The first name. This is required if you are using Amazon Connect or SAML for identity management. Inputs must be in Unicode Normalization Form C (NFC). Text containing characters in a non-NFC form (for example, decomposed characters or combining marks) are not accepted.", + "LastName": "The last name. This is required if you are using Amazon Connect or SAML for identity management. Inputs must be in Unicode Normalization Form C (NFC). Text containing characters in a non-NFC form (for example, decomposed characters or combining marks) are not accepted.", "Mobile": "The user's mobile number.", "SecondaryEmail": "The user's secondary email address. If you provide a secondary email, the user receives email notifications -- other than password reset notifications -- to this email address instead of to their primary email address.\n\n*Pattern* : `(?=^.{0,265}$)[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\\.[a-zA-Z]{2,63}`" }, @@ -9704,6 +9874,9 @@ "AWS::ConnectCampaignsV2::Campaign EmailOutboundMode": { "AgentlessConfig": "The agentless outbound mode configuration for email." }, + "AWS::ConnectCampaignsV2::Campaign EventTrigger": { + "CustomerProfilesDomainArn": "The Amazon Resource Name (ARN) of the Customer Profiles domain." + }, "AWS::ConnectCampaignsV2::Campaign LocalTimeZoneConfig": { "DefaultTimeZone": "The timezone to use for all recipients.", "LocalTimeZoneDetection": "Detects methods for the recipient's timezone." @@ -9743,7 +9916,8 @@ "AgentlessConfig": "Contains agentless outbound mode configuration." }, "AWS::ConnectCampaignsV2::Campaign Source": { - "CustomerProfilesSegmentArn": "The Amazon Resource Name (ARN) of the Customer Profiles segment." + "CustomerProfilesSegmentArn": "The Amazon Resource Name (ARN) of the Customer Profiles segment.", + "EventTrigger": "The event trigger of the campaign." }, "AWS::ConnectCampaignsV2::Campaign Tag": { "Key": "The tag keys.", @@ -9929,7 +10103,7 @@ }, "AWS::CustomerProfiles::Integration": { "DomainName": "The unique name of the domain.", - "EventTriggerNames": "", + "EventTriggerNames": "A list of unique names for active event triggers associated with the integration.", "FlowDefinition": "The configuration that controls how Customer Profiles retrieves data from the source.", "ObjectTypeName": "The name of the profile object type mapping to use.", "ObjectTypeNames": "The object type mapping.", @@ -10200,7 +10374,7 @@ "CronExpression": "The schedule, as a Cron expression. The schedule interval must be between 1 hour and 1 year. For more information, see the [Cron expressions reference](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-cron-expressions.html) in the *Amazon EventBridge User Guide* .", "Interval": "The interval between snapshots. The supported values are 1, 2, 3, 4, 6, 8, 12, and 24.", "IntervalUnit": "The interval unit.", - "Location": "*[Custom snapshot policies only]* Specifies the destination for snapshots created by the policy. To create snapshots in the same Region as the source resource, specify `CLOUD` . To create snapshots on the same Outpost as the source resource, specify `OUTPOST_LOCAL` . If you omit this parameter, `CLOUD` is used by default.\n\nIf the policy targets resources in an AWS Region , then you must create snapshots in the same Region as the source resource. If the policy targets resources on an Outpost, then you can create snapshots on the same Outpost as the source resource, or in the Region of that Outpost.", + "Location": "*[Custom snapshot policies only]* Specifies the destination for snapshots created by the policy. The allowed destinations depend on the location of the targeted resources.\n\n- If the policy targets resources in a Region, then you must create snapshots in the same Region as the source resource.\n- If the policy targets resources in a Local Zone, you can create snapshots in the same Local Zone or in its parent Region.\n- If the policy targets resources on an Outpost, then you can create snapshots on the same Outpost or in its parent Region.\n\nSpecify one of the following values:\n\n- To create snapshots in the same Region as the source resource, specify `CLOUD` .\n- To create snapshots in the same Local Zone as the source resource, specify `LOCAL_ZONE` .\n- To create snapshots on the same Outpost as the source resource, specify `OUTPOST_LOCAL` .\n\nDefault: `CLOUD`", "Scripts": "*[Custom snapshot policies that target instances only]* Specifies pre and/or post scripts for a snapshot lifecycle policy that targets instances. This is useful for creating application-consistent snapshots, or for performing specific administrative tasks before or after Amazon Data Lifecycle Manager initiates snapshot creation.\n\nFor more information, see [Automating application-consistent snapshots with pre and post scripts](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/automate-app-consistent-backups.html) .", "Times": "The time, in UTC, to start the operation. The supported format is hh:mm.\n\nThe operation occurs within a one-hour window following the specified time. If you do not specify a time, Amazon Data Lifecycle Manager selects a time within the next 24 hours." }, @@ -10271,7 +10445,7 @@ "Parameters": "*[Custom snapshot and AMI policies only]* A set of optional parameters for snapshot and AMI lifecycle policies.\n\n> If you are modifying a policy that was created or previously modified using the Amazon Data Lifecycle Manager console, then you must include this parameter and specify either the default values or the new values that you require. You can't omit this parameter or set its values to null.", "PolicyLanguage": "The type of policy to create. Specify one of the following:\n\n- `SIMPLIFIED` To create a default policy.\n- `STANDARD` To create a custom policy.", "PolicyType": "The type of policy. Specify `EBS_SNAPSHOT_MANAGEMENT` to create a lifecycle policy that manages the lifecycle of Amazon EBS snapshots. Specify `IMAGE_MANAGEMENT` to create a lifecycle policy that manages the lifecycle of EBS-backed AMIs. Specify `EVENT_BASED_POLICY` to create an event-based policy that performs specific actions when a defined event occurs in your AWS account .\n\nThe default is `EBS_SNAPSHOT_MANAGEMENT` .", - "ResourceLocations": "*[Custom snapshot and AMI policies only]* The location of the resources to backup. If the source resources are located in an AWS Region , specify `CLOUD` . If the source resources are located on an Outpost in your account, specify `OUTPOST` .\n\nIf you specify `OUTPOST` , Amazon Data Lifecycle Manager backs up all resources of the specified type with matching target tags across all of the Outposts in your account.", + "ResourceLocations": "*[Custom snapshot and AMI policies only]* The location of the resources to backup.\n\n- If the source resources are located in a Region, specify `CLOUD` . In this case, the policy targets all resources of the specified type with matching target tags across all Availability Zones in the Region.\n- *[Custom snapshot policies only]* If the source resources are located in a Local Zone, specify `LOCAL_ZONE` . In this case, the policy targets all resources of the specified type with matching target tags across all Local Zones in the Region.\n- If the source resources are located on an Outpost in your account, specify `OUTPOST` . In this case, the policy targets all resources of the specified type with matching target tags across all of the Outposts in your account.", "ResourceType": "*[Default policies only]* Specify the type of default policy to create.\n\n- To create a default policy for EBS snapshots, that creates snapshots of all volumes in the Region that do not have recent backups, specify `VOLUME` .\n- To create a default policy for EBS-backed AMIs, that creates EBS-backed AMIs from all instances in the Region that do not have recent backups, specify `INSTANCE` .", "ResourceTypes": "*[Custom snapshot policies only]* The target resource type for snapshot and AMI lifecycle policies. Use `VOLUME` to create snapshots of individual volumes or use `INSTANCE` to create multi-volume snapshots from the volumes for an instance.", "RetainInterval": "*[Default policies only]* Specifies how long the policy should retain snapshots or AMIs before deleting them. The retention period can range from 2 to 14 days, but it must be greater than the creation frequency to ensure that the policy retains at least 1 snapshot or AMI at any given time. If you do not specify a value, the default is 7.\n\nDefault: 7", @@ -10292,7 +10466,7 @@ "ArchiveRule": "*[Custom snapshot policies that target volumes only]* The snapshot archiving rule for the schedule. When you specify an archiving rule, snapshots are automatically moved from the standard tier to the archive tier once the schedule's retention threshold is met. Snapshots are then retained in the archive tier for the archive retention period that you specify.\n\nFor more information about using snapshot archiving, see [Considerations for snapshot lifecycle policies](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/snapshot-ami-policy.html#dlm-archive) .", "CopyTags": "Copy all user-defined tags on a source volume to snapshots of the volume created by this policy.", "CreateRule": "The creation rule.", - "CrossRegionCopyRules": "Specifies a rule for copying snapshots or AMIs across regions.\n\n> You can't specify cross-Region copy rules for policies that create snapshots on an Outpost. If the policy creates snapshots in a Region, then snapshots can be copied to up to three Regions or Outposts.", + "CrossRegionCopyRules": "Specifies a rule for copying snapshots or AMIs across Regions.\n\n> You can't specify cross-Region copy rules for policies that create snapshots on an Outpost or in a Local Zone. If the policy creates snapshots in a Region, then snapshots can be copied to up to three Regions or Outposts.", "DeprecateRule": "*[Custom AMI policies only]* The AMI deprecation rule for the schedule.", "FastRestoreRule": "*[Custom snapshot policies only]* The rule for enabling fast snapshot restore.", "Name": "The name of the schedule.", @@ -10844,6 +11018,7 @@ "Input": "Information on how DataBrew can find the dataset, in either the AWS Glue Data Catalog or Amazon S3 .", "Name": "The unique name of the dataset.", "PathOptions": "A set of options that defines how DataBrew interprets an Amazon S3 path of the dataset.", + "Source": "The location of the data for the dataset, either Amazon S3 or the AWS Glue Data Catalog .", "Tags": "Metadata tags that have been applied to the dataset." }, "AWS::DataBrew::Dataset CsvOptions": { @@ -10920,6 +11095,7 @@ }, "AWS::DataBrew::Dataset S3Location": { "Bucket": "The Amazon S3 bucket name.", + "BucketOwner": "The AWS account ID of the bucket owner.", "Key": "The unique name of the object in the bucket." }, "AWS::DataBrew::Dataset Tag": { @@ -11313,7 +11489,7 @@ "EfsFilesystemArn": "Specifies the ARN for your Amazon EFS file system.", "FileSystemAccessRoleArn": "Specifies an AWS Identity and Access Management (IAM) role that allows DataSync to access your Amazon EFS file system.\n\nFor information on creating this role, see [Creating a DataSync IAM role for file system access](https://docs.aws.amazon.com/datasync/latest/userguide/create-efs-location.html#create-efs-location-iam-role) .", "InTransitEncryption": "Specifies whether you want DataSync to use Transport Layer Security (TLS) 1.2 encryption when it transfers data to or from your Amazon EFS file system.\n\nIf you specify an access point using `AccessPointArn` or an IAM role using `FileSystemAccessRoleArn` , you must set this parameter to `TLS1_2` .", - "Subdirectory": "Specifies a mount path for your Amazon EFS file system. This is where DataSync reads or writes data (depending on if this is a source or destination location) on your file system.\n\nBy default, DataSync uses the root directory (or [access point](https://docs.aws.amazon.com/efs/latest/ug/efs-access-points.html) if you provide one by using `AccessPointArn` ). You can also include subdirectories using forward slashes (for example, `/path/to/folder` ).", + "Subdirectory": "Specifies a mount path for your Amazon EFS file system. This is where DataSync reads or writes data on your file system (depending on if this is a source or destination location).\n\nBy default, DataSync uses the root directory (or [access point](https://docs.aws.amazon.com/efs/latest/ug/efs-access-points.html) if you provide one by using `AccessPointArn` ). You can also include subdirectories using forward slashes (for example, `/path/to/folder` ).", "Tags": "Specifies the key-value pair that represents a tag that you want to add to the resource. The value can be an empty string. This value helps you manage, filter, and search for your resources. We recommend that you create a name tag for your location." }, "AWS::DataSync::LocationEFS Ec2Config": { @@ -11325,10 +11501,10 @@ "Value": "The value for an AWS resource tag." }, "AWS::DataSync::LocationFSxLustre": { - "FsxFilesystemArn": "The Amazon Resource Name (ARN) for the FSx for Lustre file system.", + "FsxFilesystemArn": "Specifies the Amazon Resource Name (ARN) of the FSx for Lustre file system.", "SecurityGroupArns": "The ARNs of the security groups that are used to configure the FSx for Lustre file system.\n\n*Pattern* : `^arn:(aws|aws-cn|aws-us-gov|aws-iso|aws-iso-b):ec2:[a-z\\-0-9]*:[0-9]{12}:security-group/.*$`\n\n*Length constraints* : Maximum length of 128.", - "Subdirectory": "A subdirectory in the location's path. This subdirectory in the FSx for Lustre file system is used to read data from the FSx for Lustre source location or write data to the FSx for Lustre destination.", - "Tags": "The key-value pair that represents a tag that you want to add to the resource. The value can be an empty string. This value helps you manage, filter, and search for your resources. We recommend that you create a name tag for your location." + "Subdirectory": "Specifies a mount path for your FSx for Lustre file system. The path can include subdirectories.\n\nWhen the location is used as a source, DataSync reads data from the mount path. When the location is used as a destination, DataSync writes data to the mount path. If you don't include this parameter, DataSync uses the file system's root directory ( `/` ).", + "Tags": "Specifies labels that help you categorize, filter, and search for your AWS resources. We recommend creating at least a name tag for your location." }, "AWS::DataSync::LocationFSxLustre Tag": { "Key": "The key for an AWS resource tag.", @@ -11338,7 +11514,7 @@ "Protocol": "Specifies the data transfer protocol that DataSync uses to access your Amazon FSx file system.", "SecurityGroupArns": "Specifies the Amazon Resource Names (ARNs) of the security groups that DataSync can use to access your FSx for ONTAP file system. You must configure the security groups to allow outbound traffic on the following ports (depending on the protocol that you're using):\n\n- *Network File System (NFS)* : TCP ports 111, 635, and 2049\n- *Server Message Block (SMB)* : TCP port 445\n\nYour file system's security groups must also allow inbound traffic on the same port.", "StorageVirtualMachineArn": "Specifies the ARN of the storage virtual machine (SVM) in your file system where you want to copy data to or from.", - "Subdirectory": "Specifies a path to the file share in the SVM where you'll copy your data.\n\nYou can specify a junction path (also known as a mount point), qtree path (for NFS file shares), or share name (for SMB file shares). For example, your mount path might be `/vol1` , `/vol1/tree1` , or `/share1` .\n\n> Don't specify a junction path in the SVM's root volume. For more information, see [Managing FSx for ONTAP storage virtual machines](https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/managing-svms.html) in the *Amazon FSx for NetApp ONTAP User Guide* .", + "Subdirectory": "Specifies a path to the file share in the SVM where you want to transfer data to or from.\n\nYou can specify a junction path (also known as a mount point), qtree path (for NFS file shares), or share name (for SMB file shares). For example, your mount path might be `/vol1` , `/vol1/tree1` , or `/share1` .\n\n> Don't specify a junction path in the SVM's root volume. For more information, see [Managing FSx for ONTAP storage virtual machines](https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/managing-svms.html) in the *Amazon FSx for NetApp ONTAP User Guide* .", "Tags": "Specifies labels that help you categorize, filter, and search for your AWS resources. We recommend creating at least a name tag for your location." }, "AWS::DataSync::LocationFSxONTAP NFS": { @@ -11352,7 +11528,7 @@ "SMB": "Specifies the Server Message Block (SMB) protocol configuration that DataSync uses to access your FSx for ONTAP file system's SVM." }, "AWS::DataSync::LocationFSxONTAP SMB": { - "Domain": "Specifies the fully qualified domain name (FQDN) of the Microsoft Active Directory that your storage virtual machine (SVM) belongs to.\n\nIf you have multiple domains in your environment, configuring this setting makes sure that DataSync connects to the right SVM.", + "Domain": "Specifies the name of the Windows domain that your storage virtual machine (SVM) belongs to.\n\nIf you have multiple domains in your environment, configuring this setting makes sure that DataSync connects to the right SVM.\n\nIf you have multiple Active Directory domains in your environment, configuring this parameter makes sure that DataSync connects to the right SVM.", "MountOptions": "Specifies how DataSync can access a location using the SMB protocol.", "Password": "Specifies the password of a user who has permission to access your SVM.", "User": "Specifies a user name that can mount the location and access the files, folders, and metadata that you need in the SVM.\n\nIf you provide a user in your Active Directory, note the following:\n\n- If you're using AWS Directory Service for Microsoft Active Directory , the user must be a member of the AWS Delegated FSx Administrators group.\n- If you're using a self-managed Active Directory, the user must be a member of either the Domain Admins group or a custom group that you specified for file system administration when you created your file system.\n\nMake sure that the user has the permissions it needs to copy the data you want:\n\n- `SE_TCB_NAME` : Required to set object ownership and file metadata. With this privilege, you also can copy NTFS discretionary access lists (DACLs).\n- `SE_SECURITY_NAME` : May be needed to copy NTFS system access control lists (SACLs). This operation specifically requires the Windows privilege, which is granted to members of the Domain Admins group. If you configure your task to copy SACLs, make sure that the user has the required privileges. For information about copying SACLs, see [Ownership and permissions-related options](https://docs.aws.amazon.com/datasync/latest/userguide/create-task.html#configure-ownership-and-permissions) ." @@ -11385,7 +11561,7 @@ "Value": "The value for an AWS resource tag." }, "AWS::DataSync::LocationFSxWindows": { - "Domain": "Specifies the name of the Microsoft Active Directory domain that the FSx for Windows File Server file system belongs to.\n\nIf you have multiple Active Directory domains in your environment, configuring this parameter makes sure that DataSync connects to the right file system.", + "Domain": "Specifies the name of the Windows domain that the FSx for Windows File Server file system belongs to.\n\nIf you have multiple Active Directory domains in your environment, configuring this parameter makes sure that DataSync connects to the right file system.", "FsxFilesystemArn": "Specifies the Amazon Resource Name (ARN) for the FSx for Windows File Server file system.", "Password": "Specifies the password of the user with the permissions to mount and access the files, folders, and file metadata in your FSx for Windows File Server file system.", "SecurityGroupArns": "The Amazon Resource Names (ARNs) of the security groups that are used to configure the FSx for Windows File Server file system.\n\n*Pattern* : `^arn:(aws|aws-cn|aws-us-gov|aws-iso|aws-iso-b):ec2:[a-z\\-0-9]*:[0-9]{12}:security-group/.*$`\n\n*Length constraints* : Maximum length of 128.", @@ -11615,7 +11791,8 @@ }, "AWS::DataZone::DataSource DataSourceConfigurationInput": { "GlueRunConfiguration": "The configuration of the AWS Glue data source.", - "RedshiftRunConfiguration": "The configuration of the Amazon Redshift data source." + "RedshiftRunConfiguration": "The configuration of the Amazon Redshift data source.", + "SageMakerRunConfiguration": "" }, "AWS::DataZone::DataSource FilterExpression": { "Expression": "The search filter expression.", @@ -11659,6 +11836,9 @@ "FilterExpressions": "The filter expressions specified in the relational filter configuration for the data source.", "SchemaName": "The schema name specified in the relational filter configuration for the data source." }, + "AWS::DataZone::DataSource SageMakerRunConfigurationInput": { + "TrackingAssets": "" + }, "AWS::DataZone::DataSource ScheduleConfiguration": { "Schedule": "The schedule of the data source runs.", "Timezone": "The timezone of the data source run." @@ -12147,6 +12327,7 @@ "PreferredMaintenanceWindow": "The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).\n\nFormat: `ddd:hh24:mi-ddd:hh24:mi`\n\nThe default is a 30-minute window selected at random from an 8-hour block of time for each AWS Region , occurring on a random day of the week.\n\nValid days: Mon, Tue, Wed, Thu, Fri, Sat, Sun\n\nConstraints: Minimum 30-minute window.", "RestoreToTime": "The date and time to restore the cluster to.\n\nValid values: A time in Universal Coordinated Time (UTC) format.\n\nConstraints:\n\n- Must be before the latest restorable time for the instance.\n- Must be specified if the `UseLatestRestorableTime` parameter is not provided.\n- Cannot be specified if the `UseLatestRestorableTime` parameter is `true` .\n- Cannot be specified if the `RestoreType` parameter is `copy-on-write` .\n\nExample: `2015-03-07T23:45:00Z`", "RestoreType": "The type of restore to be performed. You can specify one of the following values:\n\n- `full-copy` - The new DB cluster is restored as a full copy of the source DB cluster.\n- `copy-on-write` - The new DB cluster is restored as a clone of the source DB cluster.\n\nConstraints: You can't specify `copy-on-write` if the engine version of the source DB cluster is earlier than 1.11.\n\nIf you don't specify a `RestoreType` value, then the new DB cluster is restored as a full copy of the source DB cluster.", + "ServerlessV2ScalingConfiguration": "", "SnapshotIdentifier": "The identifier for the snapshot or cluster snapshot to restore from.\n\nYou can use either the name or the Amazon Resource Name (ARN) to specify a cluster snapshot. However, you can use only the ARN to specify a snapshot.\n\nConstraints:\n\n- Must match the identifier of an existing snapshot.", "SourceDBClusterIdentifier": "The identifier of the source cluster from which to restore.\n\nConstraints:\n\n- Must match the identifier of an existing `DBCluster` .", "StorageEncrypted": "Specifies whether the cluster is encrypted.\n\nIf you specify `SourceDBClusterIdentifier` or `SnapshotIdentifier` and don\u2019t specify `StorageEncrypted` , the encryption property is inherited from the source cluster or snapshot (unless `KMSKeyId` is specified, in which case the restored cluster will be encrypted with that KMS key). If the source is encrypted and `StorageEncrypted` is specified to be true, the restored cluster will be encrypted (if you want to use a different KMS key, specify the `KMSKeyId` property as well). If the source is unencrypted and `StorageEncrypted` is specified to be true, then the `KMSKeyId` property must be specified. If the source is encrypted, don\u2019t specify `StorageEncrypted` to be false as opting out of encryption is not allowed.", @@ -12155,6 +12336,10 @@ "UseLatestRestorableTime": "A value that is set to `true` to restore the cluster to the latest restorable backup time, and `false` otherwise.\n\nDefault: `false`\n\nConstraints: Cannot be specified if the `RestoreToTime` parameter is provided.", "VpcSecurityGroupIds": "A list of EC2 VPC security groups to associate with this cluster." }, + "AWS::DocDB::DBCluster ServerlessV2ScalingConfiguration": { + "MaxCapacity": "", + "MinCapacity": "" + }, "AWS::DocDB::DBCluster Tag": { "Key": "The required name of the tag. The string value can be from 1 to 128 Unicode characters in length and can't be prefixed with \" `aws:` \" or \" `rds:` \". The string can contain only the set of Unicode letters, digits, white space, '_', '.', '/', '=', '+', '-' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").", "Value": "The optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and can't be prefixed with \" `aws:` \" or \" `rds:` \". The string can contain only the set of Unicode letters, digits, white space, '_', '.', '/', '=', '+', '-' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\")." @@ -12640,6 +12825,9 @@ "Max": "The maximum baseline bandwidth, in Mbps. To specify no maximum limit, omit this parameter.", "Min": "The minimum baseline bandwidth, in Mbps. To specify no minimum limit, omit this parameter." }, + "AWS::EC2::EC2Fleet BaselinePerformanceFactorsRequest": { + "Cpu": "The CPU performance to consider, using an instance family as the baseline reference." + }, "AWS::EC2::EC2Fleet CapacityRebalance": { "ReplacementStrategy": "The replacement strategy to use. Only available for fleets of type `maintain` .\n\n`launch` - EC2 Fleet launches a replacement Spot Instance when a rebalance notification is emitted for an existing Spot Instance in the fleet. EC2 Fleet does not terminate the instances that receive a rebalance notification. You can terminate the old instances, or you can leave them running. You are charged for all instances while they are running.\n\n`launch-before-terminate` - EC2 Fleet launches a replacement Spot Instance when a rebalance notification is emitted for an existing Spot Instance in the fleet, and then, after a delay that you specify (in `TerminationDelay` ), terminates the instances that received a rebalance notification.", "TerminationDelay": "The amount of time (in seconds) that Amazon EC2 waits before terminating the old Spot Instance after launching a new replacement Spot Instance.\n\nRequired when `ReplacementStrategy` is set to `launch-before-terminate` .\n\nNot valid when `ReplacementStrategy` is set to `launch` .\n\nValid values: Minimum value of `120` seconds. Maximum value of `7200` seconds." @@ -12647,6 +12835,9 @@ "AWS::EC2::EC2Fleet CapacityReservationOptionsRequest": { "UsageStrategy": "Indicates whether to use unused Capacity Reservations for fulfilling On-Demand capacity.\n\nIf you specify `use-capacity-reservations-first` , the fleet uses unused Capacity Reservations to fulfill On-Demand capacity up to the target On-Demand capacity. If multiple instance pools have unused Capacity Reservations, the On-Demand allocation strategy ( `lowest-price` or `prioritized` ) is applied. If the number of unused Capacity Reservations is less than the On-Demand target capacity, the remaining On-Demand target capacity is launched according to the On-Demand allocation strategy ( `lowest-price` or `prioritized` ).\n\nIf you do not specify a value, the fleet fulfils the On-Demand capacity according to the chosen On-Demand allocation strategy." }, + "AWS::EC2::EC2Fleet CpuPerformanceFactorRequest": { + "References": "Specify an instance family to use as the baseline reference for CPU performance. All instance types that match your specified attributes will be compared against the CPU performance of the referenced instance family, regardless of CPU manufacturer or architecture differences.\n\n> Currently, only one instance family can be specified in the list." + }, "AWS::EC2::EC2Fleet FleetLaunchTemplateConfigRequest": { "LaunchTemplateSpecification": "The launch template to use. You must specify either the launch template ID or launch template name in the request.", "Overrides": "Any parameters that you specify override the same parameters in the launch template.\n\nFor fleets of type `request` and `maintain` , a maximum of 300 items is allowed across all launch templates." @@ -12675,6 +12866,7 @@ "AllowedInstanceTypes": "The instance types to apply your specified attributes against. All other instance types are ignored, even if they match your specified attributes.\n\nYou can use strings with one or more wild cards, represented by an asterisk ( `*` ), to allow an instance type, size, or generation. The following are examples: `m5.8xlarge` , `c5*.*` , `m5a.*` , `r*` , `*3*` .\n\nFor example, if you specify `c5*` ,Amazon EC2 will allow the entire C5 instance family, which includes all C5a and C5n instance types. If you specify `m5a.*` , Amazon EC2 will allow all the M5a instance types, but not the M5n instance types.\n\n> If you specify `AllowedInstanceTypes` , you can't specify `ExcludedInstanceTypes` . \n\nDefault: All instance types", "BareMetal": "Indicates whether bare metal instance types must be included, excluded, or required.\n\n- To include bare metal instance types, specify `included` .\n- To require only bare metal instance types, specify `required` .\n- To exclude bare metal instance types, specify `excluded` .\n\nDefault: `excluded`", "BaselineEbsBandwidthMbps": "The minimum and maximum baseline bandwidth to Amazon EBS, in Mbps. For more information, see [Amazon EBS\u2013optimized instances](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-optimized.html) in the *Amazon EC2 User Guide* .\n\nDefault: No minimum or maximum limits", + "BaselinePerformanceFactors": "The baseline performance to consider, using an instance family as a baseline reference. The instance family establishes the lowest acceptable level of performance. Amazon EC2 uses this baseline to guide instance type selection, but there is no guarantee that the selected instance types will always exceed the baseline for every application. Currently, this parameter only supports CPU performance as a baseline performance factor. For more information, see [Performance protection](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet-attribute-based-instance-type-selection.html#ec2fleet-abis-performance-protection) in the *Amazon EC2 User Guide* .", "BurstablePerformance": "Indicates whether burstable performance T instance types are included, excluded, or required. For more information, see [Burstable performance instances](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/burstable-performance-instances.html) .\n\n- To include burstable performance instance types, specify `included` .\n- To require only burstable performance instance types, specify `required` .\n- To exclude burstable performance instance types, specify `excluded` .\n\nDefault: `excluded`", "CpuManufacturers": "The CPU manufacturers to include.\n\n- For instance types with Intel CPUs, specify `intel` .\n- For instance types with AMD CPUs, specify `amd` .\n- For instance types with AWS CPUs, specify `amazon-web-services` .\n- For instance types with Apple CPUs, specify `apple` .\n\n> Don't confuse the CPU manufacturer with the CPU architecture. Instances will be launched with a compatible CPU architecture based on the Amazon Machine Image (AMI) that you specify in your launch template. \n\nDefault: Any manufacturer", "ExcludedInstanceTypes": "The instance types to exclude.\n\nYou can use strings with one or more wild cards, represented by an asterisk ( `*` ), to exclude an instance family, type, size, or generation. The following are examples: `m5.8xlarge` , `c5*.*` , `m5a.*` , `r*` , `*3*` .\n\nFor example, if you specify `c5*` ,Amazon EC2 will exclude the entire C5 instance family, which includes all C5a and C5n instance types. If you specify `m5a.*` , Amazon EC2 will exclude all the M5a instance types, but not the M5n instance types.\n\n> If you specify `ExcludedInstanceTypes` , you can't specify `AllowedInstanceTypes` . \n\nDefault: No excluded instance types", @@ -12719,6 +12911,9 @@ "SingleAvailabilityZone": "Indicates that the fleet launches all On-Demand Instances into a single Availability Zone.\n\nSupported only for fleets of type `instant` .", "SingleInstanceType": "Indicates that the fleet uses a single instance type to launch all On-Demand Instances in the fleet.\n\nSupported only for fleets of type `instant` ." }, + "AWS::EC2::EC2Fleet PerformanceFactorReferenceRequest": { + "InstanceFamily": "The instance family to use as a baseline reference.\n\n> Ensure that you specify the correct value for the instance family. The instance family is everything before the period ( `.` ) in the instance type name. For example, in the instance type `c6i.large` , the instance family is `c6i` , not `c6` . For more information, see [Amazon EC2 instance type naming conventions](https://docs.aws.amazon.com/ec2/latest/instancetypes/instance-type-names.html) in *Amazon EC2 Instance Types* . \n\nThe following instance families are *not supported* for performance protection:\n\n- `c1`\n- `g3` | `g3s`\n- `hpc7g`\n- `m1` | `m2`\n- `mac1` | `mac2` | `mac2-m1ultra` | `mac2-m2` | `mac2-m2pro`\n- `p3dn` | `p4d` | `p5`\n- `t1`\n- `u-12tb1` | `u-18tb1` | `u-24tb1` | `u-3tb1` | `u-6tb1` | `u-9tb1` | `u7i-12tb` | `u7in-16tb` | `u7in-24tb` | `u7in-32tb`\n\nIf you enable performance protection by specifying a supported instance family, the returned instance types will exclude the above unsupported instance families.\n\nIf you specify an unsupported instance family as a value for baseline performance, the API returns an empty response response for [GetInstanceTypesFromInstanceRequirements](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetInstanceTypesFromInstanceRequirements.html) and an exception for [CreateFleet](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateFleet.html) , [RequestSpotFleet](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RequestSpotFleet.html) , [ModifyFleet](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ModifyFleet.html) , and [ModifySpotFleetRequest](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ModifySpotFleetRequest.html) ." + }, "AWS::EC2::EC2Fleet Placement": { "Affinity": "The affinity setting for the instance on the Dedicated Host.\n\nThis parameter is not supported for [CreateFleet](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateFleet) or [ImportInstance](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ImportInstance.html) .", "AvailabilityZone": "The Availability Zone of the instance.\n\nIf not specified, an Availability Zone will be automatically chosen for you based on the load balancing criteria for the Region.\n\nThis parameter is not supported for [CreateFleet](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateFleet) .", @@ -12942,7 +13137,7 @@ "PlacementGroupName": "The name of an existing placement group that you want to launch the instance into (cluster | partition | spread).", "PrivateDnsNameOptions": "The options for the instance hostname.", "PrivateIpAddress": "The primary IPv4 address. You must specify a value from the IPv4 address range of the subnet.\n\nOnly one private IP address can be designated as primary. You can't specify this option if you've specified the option to designate a private IP address as the primary IP address in a network interface specification. You cannot specify this option if you're launching more than one instance in the request.\n\nYou cannot specify this option and the network interfaces option in the same request.\n\nIf you make an update to an instance that requires replacement, you must assign a new private IP address. During a replacement, AWS CloudFormation creates a new instance but doesn't delete the old instance until the stack has successfully updated. If the stack update fails, AWS CloudFormation uses the old instance to roll back the stack to the previous working state. The old and new instances cannot have the same private IP address.", - "PropagateTagsToVolumeOnCreation": "Indicates whether to assign the tags from the instance to all of the volumes attached to the instance at launch. If you specify `true` and you assign tags to the instance, those tags are automatically assigned to all of the volumes that you attach to the instance at launch. If you specify `false` , those tags are not assigned to the attached volumes.", + "PropagateTagsToVolumeOnCreation": "Indicates whether to assign the tags specified in the `Tags` property to the volumes specified in the `BlockDeviceMappings` property.\n\nNote that using this feature does not assign the tags to volumes that are created separately and then attached using `AWS::EC2::VolumeAttachment` .", "RamdiskId": "The ID of the RAM disk to select. Some kernels require additional drivers at launch. Check the kernel requirements for information about whether you need to specify a RAM disk. To find kernel requirements, go to the AWS Resource Center and search for the kernel ID.\n\n> We recommend that you use PV-GRUB instead of kernels and RAM disks. For more information, see [PV-GRUB](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedkernels.html) in the *Amazon EC2 User Guide* .", "SecurityGroupIds": "The IDs of the security groups. You can specify the IDs of existing security groups and references to resources created by the stack template.\n\nIf you specify a network interface, you must specify any security groups as part of the network interface.", "SecurityGroups": "[Default VPC] The names of the security groups. For a nondefault VPC, you must use security group IDs instead.\n\nYou cannot specify this option and the network interfaces option in the same request. The list can contain both the name of existing Amazon EC2 security groups or references to AWS::EC2::SecurityGroup resources created in the template.\n\nDefault: Amazon EC2 uses the default security group.", @@ -13114,7 +13309,7 @@ "UdpTimeout": "Timeout (in seconds) for idle UDP flows that have seen traffic only in a single direction or a single request-response transaction. Min: 30 seconds. Max: 60 seconds. Default: 30 seconds." }, "AWS::EC2::LaunchTemplate Cpu": { - "References": "" + "References": "The instance family to use as the baseline reference for CPU performance. All instance types that match your specified attributes are compared against the CPU performance of the referenced instance family, regardless of CPU manufacturer or architecture differences." }, "AWS::EC2::LaunchTemplate CpuOptions": { "AmdSevSnp": "Indicates whether to enable the instance for AMD SEV-SNP. AMD SEV-SNP is supported with M6a, R6a, and C6a instance types only. For more information, see [AMD SEV-SNP](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/sev-snp.html) .", @@ -13312,7 +13507,7 @@ "PrivateIpAddress": "The private IPv4 address." }, "AWS::EC2::LaunchTemplate Reference": { - "InstanceFamily": "" + "InstanceFamily": "The instance family to use as a baseline reference.\n\n> Ensure that you specify the correct value for the instance family. The instance family is everything before the period ( `.` ) in the instance type name. For example, in the instance type `c6i.large` , the instance family is `c6i` , not `c6` . For more information, see [Amazon EC2 instance type naming conventions](https://docs.aws.amazon.com/ec2/latest/instancetypes/instance-type-names.html) in *Amazon EC2 Instance Types* . \n\nThe following instance families are *not supported* for performance protection:\n\n- `c1`\n- `g3` | `g3s`\n- `hpc7g`\n- `m1` | `m2`\n- `mac1` | `mac2` | `mac2-m1ultra` | `mac2-m2` | `mac2-m2pro`\n- `p3dn` | `p4d` | `p5`\n- `t1`\n- `u-12tb1` | `u-18tb1` | `u-24tb1` | `u-3tb1` | `u-6tb1` | `u-9tb1` | `u7i-12tb` | `u7in-16tb` | `u7in-24tb` | `u7in-32tb`\n\nIf you enable performance protection by specifying a supported instance family, the returned instance types will exclude the above unsupported instance families." }, "AWS::EC2::LaunchTemplate SpotOptions": { "BlockDurationMinutes": "Deprecated.", @@ -13801,7 +13996,7 @@ "Description": "Updates the description of an ingress (inbound) security group rule. You can replace an existing description, or add a description to a rule that did not have one previously.\n\nConstraints: Up to 255 characters in length. Allowed characters are a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=;{}!$*", "FromPort": "The start of port range for the TCP and UDP protocols, or an ICMP/ICMPv6 type number. A value of `-1` indicates all ICMP/ICMPv6 types. If you specify all ICMP/ICMPv6 types, you must specify all codes.\n\nUse this for ICMP and any protocol that uses ports.", "GroupId": "The ID of the security group.", - "GroupName": "The name of the security group.\n\nConstraints: Up to 255 characters in length. Cannot start with `sg-` .\n\nValid characters: a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=&;{}!$*", + "GroupName": "[Default VPC] The name of the security group. For security groups for a default VPC you can specify either the ID or the name of the security group. For security groups for a nondefault VPC, you must specify the ID of the security group.", "IpProtocol": "The IP protocol name ( `tcp` , `udp` , `icmp` , `icmpv6` ) or number (see [Protocol Numbers](https://docs.aws.amazon.com/http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml) ).\n\nUse `-1` to specify all protocols. When authorizing security group rules, specifying `-1` or a protocol number other than `tcp` , `udp` , `icmp` , or `icmpv6` allows traffic on all ports, regardless of any port range you specify. For `tcp` , `udp` , and `icmp` , you must specify a port range. For `icmpv6` , the port range is optional; if you omit the port range, traffic for all types and codes is allowed.", "SourcePrefixListId": "The ID of a prefix list.", "SourceSecurityGroupId": "The ID of the security group. You must specify either the security group ID or the security group name. For security groups in a nondefault VPC, you must specify the security group ID.", @@ -13831,6 +14026,9 @@ "Max": "The maximum baseline bandwidth, in Mbps. To specify no maximum limit, omit this parameter.", "Min": "The minimum baseline bandwidth, in Mbps. To specify no minimum limit, omit this parameter." }, + "AWS::EC2::SpotFleet BaselinePerformanceFactorsRequest": { + "Cpu": "The CPU performance to consider, using an instance family as the baseline reference." + }, "AWS::EC2::SpotFleet BlockDeviceMapping": { "DeviceName": "The device name (for example, `/dev/sdh` or `xvdh` ).", "Ebs": "Parameters used to automatically set up EBS volumes when the instance is launched.", @@ -13843,6 +14041,9 @@ "AWS::EC2::SpotFleet ClassicLoadBalancersConfig": { "ClassicLoadBalancers": "One or more Classic Load Balancers." }, + "AWS::EC2::SpotFleet CpuPerformanceFactorRequest": { + "References": "Specify an instance family to use as the baseline reference for CPU performance. All instance types that match your specified attributes will be compared against the CPU performance of the referenced instance family, regardless of CPU manufacturer or architecture differences.\n\n> Currently, only one instance family can be specified in the list." + }, "AWS::EC2::SpotFleet EbsBlockDevice": { "DeleteOnTermination": "Indicates whether the EBS volume is deleted on instance termination. For more information, see [Preserving Amazon EBS volumes on instance termination](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/terminating-instances.html#preserving-volumes-on-termination) in the *Amazon EC2 User Guide* .", "Encrypted": "Indicates whether the encryption state of an EBS volume is changed while being restored from a backing snapshot. The effect of setting the encryption state to `true` depends on the volume origin (new or from a snapshot), starting encryption state, ownership, and whether encryption by default is enabled. For more information, see [Amazon EBS Encryption](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html#encryption-parameters) in the *Amazon EC2 User Guide* .\n\nIn no case can you remove encryption from an encrypted volume.\n\nEncrypted volumes can only be attached to instances that support Amazon EBS encryption. For more information, see [Supported Instance Types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html#EBSEncryption_supported_instances) .\n\nThis parameter is not returned by [DescribeImageAttribute](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeImageAttribute.html) .", @@ -13875,7 +14076,7 @@ "Ipv6Addresses": "The IPv6 addresses to assign to the network interface. You cannot specify this option and the option to assign a number of IPv6 addresses in the same request. You cannot specify this option if you've specified a minimum number of instances to launch.", "NetworkInterfaceId": "The ID of the network interface.\n\nIf you are creating a Spot Fleet, omit this parameter because you can\u2019t specify a network interface ID in a launch specification.", "PrivateIpAddresses": "The private IPv4 addresses to assign to the network interface. Only one private IPv4 address can be designated as primary. You cannot specify this option if you're launching more than one instance in a [RunInstances](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances.html) request.", - "SecondaryPrivateIpAddressCount": "The number of secondary private IPv4 addresses. You can't specify this option and specify more than one private IP address using the private IP addresses option. You cannot specify this option if you're launching more than one instance in a [RunInstances](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances.html) request.", + "SecondaryPrivateIpAddressCount": "The number of secondary private IPv4 addresses. You can\u2019t specify this parameter and also specify a secondary private IP address using the `PrivateIpAddress` parameter.", "SubnetId": "The ID of the subnet associated with the network interface." }, "AWS::EC2::SpotFleet InstanceRequirementsRequest": { @@ -13887,6 +14088,7 @@ "AllowedInstanceTypes": "The instance types to apply your specified attributes against. All other instance types are ignored, even if they match your specified attributes.\n\nYou can use strings with one or more wild cards, represented by an asterisk ( `*` ), to allow an instance type, size, or generation. The following are examples: `m5.8xlarge` , `c5*.*` , `m5a.*` , `r*` , `*3*` .\n\nFor example, if you specify `c5*` ,Amazon EC2 will allow the entire C5 instance family, which includes all C5a and C5n instance types. If you specify `m5a.*` , Amazon EC2 will allow all the M5a instance types, but not the M5n instance types.\n\n> If you specify `AllowedInstanceTypes` , you can't specify `ExcludedInstanceTypes` . \n\nDefault: All instance types", "BareMetal": "Indicates whether bare metal instance types must be included, excluded, or required.\n\n- To include bare metal instance types, specify `included` .\n- To require only bare metal instance types, specify `required` .\n- To exclude bare metal instance types, specify `excluded` .\n\nDefault: `excluded`", "BaselineEbsBandwidthMbps": "The minimum and maximum baseline bandwidth to Amazon EBS, in Mbps. For more information, see [Amazon EBS\u2013optimized instances](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-optimized.html) in the *Amazon EC2 User Guide* .\n\nDefault: No minimum or maximum limits", + "BaselinePerformanceFactors": "The baseline performance to consider, using an instance family as a baseline reference. The instance family establishes the lowest acceptable level of performance. Amazon EC2 uses this baseline to guide instance type selection, but there is no guarantee that the selected instance types will always exceed the baseline for every application. Currently, this parameter only supports CPU performance as a baseline performance factor. For more information, see [Performance protection](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet-attribute-based-instance-type-selection.html#ec2fleet-abis-performance-protection) in the *Amazon EC2 User Guide* .", "BurstablePerformance": "Indicates whether burstable performance T instance types are included, excluded, or required. For more information, see [Burstable performance instances](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/burstable-performance-instances.html) .\n\n- To include burstable performance instance types, specify `included` .\n- To require only burstable performance instance types, specify `required` .\n- To exclude burstable performance instance types, specify `excluded` .\n\nDefault: `excluded`", "CpuManufacturers": "The CPU manufacturers to include.\n\n- For instance types with Intel CPUs, specify `intel` .\n- For instance types with AMD CPUs, specify `amd` .\n- For instance types with AWS CPUs, specify `amazon-web-services` .\n- For instance types with Apple CPUs, specify `apple` .\n\n> Don't confuse the CPU manufacturer with the CPU architecture. Instances will be launched with a compatible CPU architecture based on the Amazon Machine Image (AMI) that you specify in your launch template. \n\nDefault: Any manufacturer", "ExcludedInstanceTypes": "The instance types to exclude.\n\nYou can use strings with one or more wild cards, represented by an asterisk ( `*` ), to exclude an instance family, type, size, or generation. The following are examples: `m5.8xlarge` , `c5*.*` , `m5a.*` , `r*` , `*3*` .\n\nFor example, if you specify `c5*` ,Amazon EC2 will exclude the entire C5 instance family, which includes all C5a and C5n instance types. If you specify `m5a.*` , Amazon EC2 will exclude all the M5a instance types, but not the M5n instance types.\n\n> If you specify `ExcludedInstanceTypes` , you can't specify `AllowedInstanceTypes` . \n\nDefault: No excluded instance types", @@ -13937,6 +14139,9 @@ "Max": "The maximum number of network interfaces. To specify no maximum limit, omit this parameter.", "Min": "The minimum number of network interfaces. To specify no minimum limit, omit this parameter." }, + "AWS::EC2::SpotFleet PerformanceFactorReferenceRequest": { + "InstanceFamily": "The instance family to use as a baseline reference.\n\n> Ensure that you specify the correct value for the instance family. The instance family is everything before the period ( `.` ) in the instance type name. For example, in the instance type `c6i.large` , the instance family is `c6i` , not `c6` . For more information, see [Amazon EC2 instance type naming conventions](https://docs.aws.amazon.com/ec2/latest/instancetypes/instance-type-names.html) in *Amazon EC2 Instance Types* . \n\nThe following instance families are *not supported* for performance protection:\n\n- `c1`\n- `g3` | `g3s`\n- `hpc7g`\n- `m1` | `m2`\n- `mac1` | `mac2` | `mac2-m1ultra` | `mac2-m2` | `mac2-m2pro`\n- `p3dn` | `p4d` | `p5`\n- `t1`\n- `u-12tb1` | `u-18tb1` | `u-24tb1` | `u-3tb1` | `u-6tb1` | `u-9tb1` | `u7i-12tb` | `u7in-16tb` | `u7in-24tb` | `u7in-32tb`\n\nIf you enable performance protection by specifying a supported instance family, the returned instance types will exclude the above unsupported instance families.\n\nIf you specify an unsupported instance family as a value for baseline performance, the API returns an empty response for [GetInstanceTypesFromInstanceRequirements](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetInstanceTypesFromInstanceRequirements.html) and an exception for [CreateFleet](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateFleet.html) , [RequestSpotFleet](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RequestSpotFleet.html) , [ModifyFleet](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ModifyFleet.html) , and [ModifySpotFleetRequest](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ModifySpotFleetRequest.html) ." + }, "AWS::EC2::SpotFleet PrivateIpAddressSpecification": { "Primary": "Indicates whether the private IPv4 address is the primary private IPv4 address. Only one IPv4 address can be designated as primary.", "PrivateIpAddress": "The private IPv4 address." @@ -14285,6 +14490,7 @@ "Ipv4IpamPoolId": "Associate a CIDR allocated from an IPv4 IPAM pool to a VPC. For more information about Amazon VPC IP Address Manager (IPAM), see [What is IPAM?](https://docs.aws.amazon.com//vpc/latest/ipam/what-is-it-ipam.html) in the *Amazon VPC IPAM User Guide* .", "Ipv4NetmaskLength": "The netmask length of the IPv4 CIDR you would like to associate from an Amazon VPC IP Address Manager (IPAM) pool. For more information about IPAM, see [What is IPAM?](https://docs.aws.amazon.com//vpc/latest/ipam/what-is-it-ipam.html) in the *Amazon VPC IPAM User Guide* .", "Ipv6CidrBlock": "An IPv6 CIDR block from the IPv6 address pool. You must also specify `Ipv6Pool` in the request.\n\nTo let Amazon choose the IPv6 CIDR block for you, omit this parameter.", + "Ipv6CidrBlockNetworkBorderGroup": "", "Ipv6IpamPoolId": "Associates a CIDR allocated from an IPv6 IPAM pool to a VPC. For more information about Amazon VPC IP Address Manager (IPAM), see [What is IPAM?](https://docs.aws.amazon.com//vpc/latest/ipam/what-is-it-ipam.html) in the *Amazon VPC IPAM User Guide* .", "Ipv6NetmaskLength": "The netmask length of the IPv6 CIDR you would like to associate from an Amazon VPC IP Address Manager (IPAM) pool. For more information about IPAM, see [What is IPAM?](https://docs.aws.amazon.com//vpc/latest/ipam/what-is-it-ipam.html) in the *Amazon VPC IPAM User Guide* .", "Ipv6Pool": "The ID of an IPv6 address pool from which to allocate the IPv6 CIDR block.", @@ -14295,15 +14501,28 @@ "VpcId": "The ID of the VPC." }, "AWS::EC2::VPCEndpoint": { + "DnsOptions": "Describes the DNS options for an endpoint.", + "IpAddressType": "The supported IP address types.", "PolicyDocument": "An endpoint policy, which controls access to the service from the VPC. The default endpoint policy allows full access to the service. Endpoint policies are supported only for gateway and interface endpoints.\n\nFor CloudFormation templates in YAML, you can provide the policy in JSON or YAML format. For example, if you have a JSON policy, you can convert it to YAML before including it in the YAML template, and AWS CloudFormation converts the policy to JSON format before calling the API actions for AWS PrivateLink . Alternatively, you can include the JSON directly in the YAML, as shown in the following `Properties` section:\n\n`Properties: VpcEndpointType: 'Interface' ServiceName: !Sub 'com.amazonaws.${AWS::Region}.logs' PolicyDocument: '{ \"Version\":\"2012-10-17\", \"Statement\": [{ \"Effect\":\"Allow\", \"Principal\":\"*\", \"Action\":[\"logs:Describe*\",\"logs:Get*\",\"logs:List*\",\"logs:FilterLogEvents\"], \"Resource\":\"*\" }] }'`", "PrivateDnsEnabled": "Indicate whether to associate a private hosted zone with the specified VPC. The private hosted zone contains a record set for the default public DNS name for the service for the Region (for example, `kinesis.us-east-1.amazonaws.com` ), which resolves to the private IP addresses of the endpoint network interfaces in the VPC. This enables you to make requests to the default public DNS name for the service instead of the public DNS names that are automatically generated by the VPC endpoint service.\n\nTo use a private hosted zone, you must set the following VPC attributes to `true` : `enableDnsHostnames` and `enableDnsSupport` .\n\nThis property is supported only for interface endpoints.\n\nDefault: `false`", + "ResourceConfigurationArn": "The Amazon Resource Name (ARN) of the resource configuration.", "RouteTableIds": "The IDs of the route tables. Routing is supported only for gateway endpoints.", "SecurityGroupIds": "The IDs of the security groups to associate with the endpoint network interfaces. If this parameter is not specified, we use the default security group for the VPC. Security groups are supported only for interface endpoints.", "ServiceName": "The name of the endpoint service.", + "ServiceNetworkArn": "The Amazon Resource Name (ARN) of the service network.", "SubnetIds": "The IDs of the subnets in which to create endpoint network interfaces. You must specify this property for an interface endpoint or a Gateway Load Balancer endpoint. You can't specify this property for a gateway endpoint. For a Gateway Load Balancer endpoint, you can specify only one subnet.", + "Tags": "", "VpcEndpointType": "The type of endpoint.\n\nDefault: Gateway", "VpcId": "The ID of the VPC." }, + "AWS::EC2::VPCEndpoint DnsOptionsSpecification": { + "DnsRecordIpType": "The DNS records created for the endpoint.", + "PrivateDnsOnlyForInboundResolverEndpoint": "Indicates whether to enable private DNS only for inbound endpoints. This option is available only for services that support both gateway and interface endpoints. It routes traffic that originates from the VPC to the gateway endpoint and traffic that originates from on-premises to the interface endpoint." + }, + "AWS::EC2::VPCEndpoint Tag": { + "Key": "The key of the tag.\n\nConstraints: Tag keys are case-sensitive and accept a maximum of 127 Unicode characters. May not begin with `aws:` .", + "Value": "The value of the tag.\n\nConstraints: Tag values are case-sensitive and accept a maximum of 256 Unicode characters." + }, "AWS::EC2::VPCEndpointConnectionNotification": { "ConnectionEvents": "The endpoint events for which to receive notifications. Valid values are `Accept` , `Connect` , `Delete` , and `Reject` .", "ConnectionNotificationArn": "The ARN of the SNS topic for the notifications.", @@ -14315,7 +14534,12 @@ "ContributorInsightsEnabled": "Indicates whether to enable the built-in Contributor Insights rules provided by AWS PrivateLink .", "GatewayLoadBalancerArns": "The Amazon Resource Names (ARNs) of the Gateway Load Balancers.", "NetworkLoadBalancerArns": "The Amazon Resource Names (ARNs) of the Network Load Balancers.", - "PayerResponsibility": "The entity that is responsible for the endpoint costs. The default is the endpoint owner. If you set the payer responsibility to the service owner, you cannot set it back to the endpoint owner." + "PayerResponsibility": "The entity that is responsible for the endpoint costs. The default is the endpoint owner. If you set the payer responsibility to the service owner, you cannot set it back to the endpoint owner.", + "Tags": "" + }, + "AWS::EC2::VPCEndpointService Tag": { + "Key": "The key of the tag.\n\nConstraints: Tag keys are case-sensitive and accept a maximum of 127 Unicode characters. May not begin with `aws:` .", + "Value": "The value of the tag.\n\nConstraints: Tag values are case-sensitive and accept a maximum of 256 Unicode characters." }, "AWS::EC2::VPCEndpointServicePermissions": { "AllowedPrincipals": "The Amazon Resource Names (ARN) of one or more principals (for example, users, IAM roles, and AWS accounts ). Permissions are granted to the principals in this list. To grant permissions to all principals, specify an asterisk (*). Permissions are revoked for principals not in this list. If the list is empty, then all permissions are revoked.", @@ -14686,7 +14910,7 @@ "AWS::ECS::Cluster": { "CapacityProviders": "The short name of one or more capacity providers to associate with the cluster. A capacity provider must be associated with a cluster before it can be included as part of the default capacity provider strategy of the cluster or used in a capacity provider strategy when calling the [CreateService](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_CreateService.html) or [RunTask](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_RunTask.html) actions.\n\nIf specifying a capacity provider that uses an Auto Scaling group, the capacity provider must be created but not associated with another cluster. New Auto Scaling group capacity providers can be created with the [CreateCapacityProvider](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_CreateCapacityProvider.html) API operation.\n\nTo use a AWS Fargate capacity provider, specify either the `FARGATE` or `FARGATE_SPOT` capacity providers. The AWS Fargate capacity providers are available to all accounts and only need to be associated with a cluster to be used.\n\nThe [PutCapacityProvider](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_PutCapacityProvider.html) API operation is used to update the list of available capacity providers for a cluster after the cluster is created.", "ClusterName": "A user-generated string that you use to identify your cluster. If you don't specify a name, AWS CloudFormation generates a unique physical ID for the name.", - "ClusterSettings": "The settings to use when creating a cluster. This parameter is used to turn on CloudWatch Container Insights for a cluster.", + "ClusterSettings": "The settings to use when creating a cluster. This parameter is used to turn on CloudWatch Container Insights with enhanced observability or CloudWatch Container Insights for a cluster.\n\nContainer Insights with enhanced observability provides all the Container Insights metrics, plus additional task and container metrics. This version supports enhanced observability for Amazon ECS clusters using the Amazon EC2 and Fargate launch types. After you configure Container Insights with enhanced observability on Amazon ECS, Container Insights auto-collects detailed infrastructure telemetry from the cluster level down to the container level in your environment and displays these critical performance data in curated dashboards removing the heavy lifting in observability set-up.\n\nFor more information, see [Monitor Amazon ECS containers using Container Insights with enhanced observability](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/cloudwatch-container-insights.html) in the *Amazon Elastic Container Service Developer Guide* .", "Configuration": "The execute command and managed storage configuration for the cluster.", "DefaultCapacityProviderStrategy": "The default capacity provider strategy for the cluster. When services or tasks are run in the cluster with no launch type or capacity provider strategy specified, the default capacity provider strategy is used.", "ServiceConnectDefaults": "Use this parameter to set a default Service Connect namespace. After you set a default Service Connect namespace, any new services with Service Connect turned on that are created in the cluster are added as client services in the namespace. This setting only applies to new services that set the `enabled` parameter to `true` in the `ServiceConnectConfiguration` . You can set the namespace of each service individually in the `ServiceConnectConfiguration` to override this default parameter.\n\nTasks that run in a namespace can use short names to connect to services in the namespace. Tasks can connect to services across all of the clusters in the namespace. Tasks connect through a managed proxy container that collects logs and metrics for increased visibility. Only the tasks that Amazon ECS services create are supported with Service Connect. For more information, see [Service Connect](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-connect.html) in the *Amazon Elastic Container Service Developer Guide* .", @@ -14703,7 +14927,7 @@ }, "AWS::ECS::Cluster ClusterSettings": { "Name": "The name of the cluster setting. The value is `containerInsights` .", - "Value": "The value to set for the cluster setting. The supported values are `enabled` and `disabled` .\n\nIf you set `name` to `containerInsights` and `value` to `enabled` , CloudWatch Container Insights will be on for the cluster, otherwise it will be off unless the `containerInsights` account setting is turned on. If a cluster value is specified, it will override the `containerInsights` value set with [PutAccountSetting](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_PutAccountSetting.html) or [PutAccountSettingDefault](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_PutAccountSettingDefault.html) ." + "Value": "The value to set for the cluster setting. The supported values are `enhanced` , `enabled` , and `disabled` .\n\nTo use Container Insights with enhanced observability, set the `containerInsights` account setting to `enhanced` .\n\nTo use Container Insights, set the `containerInsights` account setting to `enabled` .\n\nIf a cluster value is specified, it will override the `containerInsights` value set with [PutAccountSetting](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_PutAccountSetting.html) or [PutAccountSettingDefault](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_PutAccountSettingDefault.html) ." }, "AWS::ECS::Cluster ExecuteCommandConfiguration": { "KmsKeyId": "Specify an AWS Key Management Service key ID to encrypt the data between the local client and the container.", @@ -14745,7 +14969,7 @@ }, "AWS::ECS::Service": { "AvailabilityZoneRebalancing": "Indicates whether to use Availability Zone rebalancing for the service.\n\nFor more information, see [Balancing an Amazon ECS service across Availability Zones](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-rebalancing.html) in the *Amazon Elastic Container Service Developer Guide* .", - "CapacityProviderStrategy": "The capacity provider strategy to use for the service.\n\nIf a `capacityProviderStrategy` is specified, the `launchType` parameter must be omitted. If no `capacityProviderStrategy` or `launchType` is specified, the `defaultCapacityProviderStrategy` for the cluster is used.\n\nA capacity provider strategy may contain a maximum of 6 capacity providers.", + "CapacityProviderStrategy": "The capacity provider strategy to use for the service.\n\nIf a `capacityProviderStrategy` is specified, the `launchType` parameter must be omitted. If no `capacityProviderStrategy` or `launchType` is specified, the `defaultCapacityProviderStrategy` for the cluster is used.\n\nA capacity provider strategy can contain a maximum of 20 capacity providers.", "Cluster": "The short name or full Amazon Resource Name (ARN) of the cluster that you run your service on. If you do not specify a cluster, the default cluster is assumed.", "DeploymentConfiguration": "Optional deployment parameters that control how many tasks run during the deployment and the ordering of stopping and starting tasks.", "DeploymentController": "The deployment controller to use for the service. If no deployment controller is specified, the default value of `ECS` is used.", @@ -14771,7 +14995,7 @@ "VpcLatticeConfigurations": "The VPC Lattice configuration for the service being created." }, "AWS::ECS::Service AwsVpcConfiguration": { - "AssignPublicIp": "Whether the task's elastic network interface receives a public IP address. The default value is `DISABLED` .", + "AssignPublicIp": "Whether the task's elastic network interface receives a public IP address. The default value is `ENABLED` .", "SecurityGroups": "The IDs of the security groups associated with the task or service. If you don't specify a security group, the default security group for the VPC is used. There's a limit of 5 security groups that can be specified per `awsvpcConfiguration` .\n\n> All specified security groups must be from the same VPC.", "Subnets": "The IDs of the subnets associated with the task or service. There's a limit of 16 subnets that can be specified per `awsvpcConfiguration` .\n\n> All specified subnets must be from the same VPC." }, @@ -14792,7 +15016,7 @@ "AWS::ECS::Service DeploymentConfiguration": { "Alarms": "Information about the CloudWatch alarms.", "DeploymentCircuitBreaker": "> The deployment circuit breaker can only be used for services using the rolling update ( `ECS` ) deployment type. \n\nThe *deployment circuit breaker* determines whether a service deployment will fail if the service can't reach a steady state. If you use the deployment circuit breaker, a service deployment will transition to a failed state and stop launching new tasks. If you use the rollback option, when a service deployment fails, the service is rolled back to the last deployment that completed successfully. For more information, see [Rolling update](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-type-ecs.html) in the *Amazon Elastic Container Service Developer Guide*", - "MaximumPercent": "If a service is using the rolling update ( `ECS` ) deployment type, the `maximumPercent` parameter represents an upper limit on the number of your service's tasks that are allowed in the `RUNNING` or `PENDING` state during a deployment, as a percentage of the `desiredCount` (rounded down to the nearest integer). This parameter enables you to define the deployment batch size. For example, if your service is using the `REPLICA` service scheduler and has a `desiredCount` of four tasks and a `maximumPercent` value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default `maximumPercent` value for a service using the `REPLICA` service scheduler is 200%.\n\nThe Amazon ECS scheduler uses this parameter to replace unhealthy tasks by starting replacement tasks first and then stopping the unhealthy tasks, as long as cluster resources for starting replacement tasks are available. For more information about how the scheduler replaces unhealthy tasks, see [Amazon ECS services](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs_services.html) .\n\nIf a service is using either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types, and tasks in the service use the EC2 launch type, the *maximum percent* value is set to the default value. The *maximum percent* value is used to define the upper limit on the number of the tasks in the service that remain in the `RUNNING` state while the container instances are in the `DRAINING` state.\n\n> You can't specify a custom `maximumPercent` value for a service that uses either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and has tasks that use the EC2 launch type. \n\nIf the tasks in the service use the Fargate launch type, the maximum percent value is not used, although it is returned when describing your service.", + "MaximumPercent": "If a service is using the rolling update ( `ECS` ) deployment type, the `maximumPercent` parameter represents an upper limit on the number of your service's tasks that are allowed in the `RUNNING` or `PENDING` state during a deployment, as a percentage of the `desiredCount` (rounded down to the nearest integer). This parameter enables you to define the deployment batch size. For example, if your service is using the `REPLICA` service scheduler and has a `desiredCount` of four tasks and a `maximumPercent` value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default `maximumPercent` value for a service using the `REPLICA` service scheduler is 200%.\n\nThe Amazon ECS scheduler uses this parameter to replace unhealthy tasks by starting replacement tasks first and then stopping the unhealthy tasks, as long as cluster resources for starting replacement tasks are available. For more information about how the scheduler replaces unhealthy tasks, see [Amazon ECS services](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs_services.html) .\n\nIf a service is using either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types, and tasks in the service use the EC2 launch type, the *maximum percent* value is set to the default value. The *maximum percent* value is used to define the upper limit on the number of the tasks in the service that remain in the `RUNNING` state while the container instances are in the `DRAINING` state.\n\n> You can't specify a custom `maximumPercent` value for a service that uses either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and has tasks that use the EC2 launch type. \n\nIf the service uses either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types, and the tasks in the service use the Fargate launch type, the maximum percent value is not used. The value is still returned when describing your service.", "MinimumHealthyPercent": "If a service is using the rolling update ( `ECS` ) deployment type, the `minimumHealthyPercent` represents a lower limit on the number of your service's tasks that must remain in the `RUNNING` state during a deployment, as a percentage of the `desiredCount` (rounded up to the nearest integer). This parameter enables you to deploy without using additional cluster capacity. For example, if your service has a `desiredCount` of four tasks and a `minimumHealthyPercent` of 50%, the service scheduler may stop two existing tasks to free up cluster capacity before starting two new tasks.\n\nIf any tasks are unhealthy and if `maximumPercent` doesn't allow the Amazon ECS scheduler to start replacement tasks, the scheduler stops the unhealthy tasks one-by-one \u2014 using the `minimumHealthyPercent` as a constraint \u2014 to clear up capacity to launch replacement tasks. For more information about how the scheduler replaces unhealthy tasks, see [Amazon ECS services](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs_services.html) .\n\nFor services that *do not* use a load balancer, the following should be noted:\n\n- A service is considered healthy if all essential containers within the tasks in the service pass their health checks.\n- If a task has no essential containers with a health check defined, the service scheduler will wait for 40 seconds after a task reaches a `RUNNING` state before the task is counted towards the minimum healthy percent total.\n- If a task has one or more essential containers with a health check defined, the service scheduler will wait for the task to reach a healthy status before counting it towards the minimum healthy percent total. A task is considered healthy when all essential containers within the task have passed their health checks. The amount of time the service scheduler can wait for is determined by the container health check settings.\n\nFor services that *do* use a load balancer, the following should be noted:\n\n- If a task has no essential containers with a health check defined, the service scheduler will wait for the load balancer target group health check to return a healthy status before counting the task towards the minimum healthy percent total.\n- If a task has an essential container with a health check defined, the service scheduler will wait for both the task to reach a healthy status and the load balancer target group health check to return a healthy status before counting the task towards the minimum healthy percent total.\n\nThe default value for a replica service for `minimumHealthyPercent` is 100%. The default `minimumHealthyPercent` value for a service using the `DAEMON` service schedule is 0% for the AWS CLI , the AWS SDKs, and the APIs and 50% for the AWS Management Console.\n\nThe minimum number of healthy tasks during a deployment is the `desiredCount` multiplied by the `minimumHealthyPercent` /100, rounded up to the nearest integer value.\n\nIf a service is using either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and is running tasks that use the EC2 launch type, the *minimum healthy percent* value is set to the default value. The *minimum healthy percent* value is used to define the lower limit on the number of the tasks in the service that remain in the `RUNNING` state while the container instances are in the `DRAINING` state.\n\n> You can't specify a custom `minimumHealthyPercent` value for a service that uses either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and has tasks that use the EC2 launch type. \n\nIf a service is using either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and is running tasks that use the Fargate launch type, the minimum healthy percent value is not used, although it is returned when describing your service." }, "AWS::ECS::Service DeploymentController": { @@ -14811,7 +15035,7 @@ }, "AWS::ECS::Service LogConfiguration": { "LogDriver": "The log driver to use for the container.\n\nFor tasks on AWS Fargate , the supported log drivers are `awslogs` , `splunk` , and `awsfirelens` .\n\nFor tasks hosted on Amazon EC2 instances, the supported log drivers are `awslogs` , `fluentd` , `gelf` , `json-file` , `journald` , `syslog` , `splunk` , and `awsfirelens` .\n\nFor more information about using the `awslogs` log driver, see [Send Amazon ECS logs to CloudWatch](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using_awslogs.html) in the *Amazon Elastic Container Service Developer Guide* .\n\nFor more information about using the `awsfirelens` log driver, see [Send Amazon ECS logs to an AWS service or AWS Partner](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using_firelens.html) .\n\n> If you have a custom driver that isn't listed, you can fork the Amazon ECS container agent project that's [available on GitHub](https://docs.aws.amazon.com/https://github.com/aws/amazon-ecs-agent) and customize it to work with that driver. We encourage you to submit pull requests for changes that you would like to have included. However, we don't currently provide support for running modified copies of this software.", - "Options": "The configuration options to send to the log driver.\n\nThe options you can specify depend on the log driver. Some of the options you can specify when you use the `awslogs` log driver to route logs to Amazon CloudWatch include the following:\n\n- **awslogs-create-group** - Required: No\n\nSpecify whether you want the log group to be created automatically. If this option isn't specified, it defaults to `false` .\n\n> Your IAM policy must include the `logs:CreateLogGroup` permission before you attempt to use `awslogs-create-group` .\n- **awslogs-region** - Required: Yes\n\nSpecify the AWS Region that the `awslogs` log driver is to send your Docker logs to. You can choose to send all of your logs from clusters in different Regions to a single region in CloudWatch Logs. This is so that they're all visible in one location. Otherwise, you can separate them by Region for more granularity. Make sure that the specified log group exists in the Region that you specify with this option.\n- **awslogs-group** - Required: Yes\n\nMake sure to specify a log group that the `awslogs` log driver sends its log streams to.\n- **awslogs-stream-prefix** - Required: Yes, when using the Fargate launch type.Optional for the EC2 launch type, required for the Fargate launch type.\n\nUse the `awslogs-stream-prefix` option to associate a log stream with the specified prefix, the container name, and the ID of the Amazon ECS task that the container belongs to. If you specify a prefix with this option, then the log stream takes the format `prefix-name/container-name/ecs-task-id` .\n\nIf you don't specify a prefix with this option, then the log stream is named after the container ID that's assigned by the Docker daemon on the container instance. Because it's difficult to trace logs back to the container that sent them with just the Docker container ID (which is only available on the container instance), we recommend that you specify a prefix with this option.\n\nFor Amazon ECS services, you can use the service name as the prefix. Doing so, you can trace log streams to the service that the container belongs to, the name of the container that sent them, and the ID of the task that the container belongs to.\n\nYou must specify a stream-prefix for your logs to have your logs appear in the Log pane when using the Amazon ECS console.\n- **awslogs-datetime-format** - Required: No\n\nThis option defines a multiline start pattern in Python `strftime` format. A log message consists of a line that matches the pattern and any following lines that don\u2019t match the pattern. The matched line is the delimiter between log messages.\n\nOne example of a use case for using this format is for parsing output such as a stack dump, which might otherwise be logged in multiple entries. The correct pattern allows it to be captured in a single entry.\n\nFor more information, see [awslogs-datetime-format](https://docs.aws.amazon.com/https://docs.docker.com/config/containers/logging/awslogs/#awslogs-datetime-format) .\n\nYou cannot configure both the `awslogs-datetime-format` and `awslogs-multiline-pattern` options.\n\n> Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.\n- **awslogs-multiline-pattern** - Required: No\n\nThis option defines a multiline start pattern that uses a regular expression. A log message consists of a line that matches the pattern and any following lines that don\u2019t match the pattern. The matched line is the delimiter between log messages.\n\nFor more information, see [awslogs-multiline-pattern](https://docs.aws.amazon.com/https://docs.docker.com/config/containers/logging/awslogs/#awslogs-multiline-pattern) .\n\nThis option is ignored if `awslogs-datetime-format` is also configured.\n\nYou cannot configure both the `awslogs-datetime-format` and `awslogs-multiline-pattern` options.\n\n> Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.\n- **mode** - Required: No\n\nValid values: `non-blocking` | `blocking`\n\nThis option defines the delivery mode of log messages from the container to CloudWatch Logs. The delivery mode you choose affects application availability when the flow of logs from container to CloudWatch is interrupted.\n\nIf you use the `blocking` mode and the flow of logs to CloudWatch is interrupted, calls from container code to write to the `stdout` and `stderr` streams will block. The logging thread of the application will block as a result. This may cause the application to become unresponsive and lead to container healthcheck failure.\n\nIf you use the `non-blocking` mode, the container's logs are instead stored in an in-memory intermediate buffer configured with the `max-buffer-size` option. This prevents the application from becoming unresponsive when logs cannot be sent to CloudWatch. We recommend using this mode if you want to ensure service availability and are okay with some log loss. For more information, see [Preventing log loss with non-blocking mode in the `awslogs` container log driver](https://docs.aws.amazon.com/containers/preventing-log-loss-with-non-blocking-mode-in-the-awslogs-container-log-driver/) .\n- **max-buffer-size** - Required: No\n\nDefault value: `1m`\n\nWhen `non-blocking` mode is used, the `max-buffer-size` log option controls the size of the buffer that's used for intermediate message storage. Make sure to specify an adequate buffer size based on your application. When the buffer fills up, further logs cannot be stored. Logs that cannot be stored are lost.\n\nTo route logs using the `splunk` log router, you need to specify a `splunk-token` and a `splunk-url` .\n\nWhen you use the `awsfirelens` log router to route logs to an AWS Service or AWS Partner Network destination for log storage and analytics, you can set the `log-driver-buffer-limit` option to limit the number of events that are buffered in memory, before being sent to the log router container. It can help to resolve potential log loss issue because high throughput might result in memory running out for the buffer inside of Docker.\n\nOther options you can specify when using `awsfirelens` to route logs depend on the destination. When you export logs to Amazon Data Firehose, you can specify the AWS Region with `region` and a name for the log stream with `delivery_stream` .\n\nWhen you export logs to Amazon Kinesis Data Streams, you can specify an AWS Region with `region` and a data stream name with `stream` .\n\nWhen you export logs to Amazon OpenSearch Service, you can specify options like `Name` , `Host` (OpenSearch Service endpoint without protocol), `Port` , `Index` , `Type` , `Aws_auth` , `Aws_region` , `Suppress_Type_Name` , and `tls` .\n\nWhen you export logs to Amazon S3, you can specify the bucket using the `bucket` option. You can also specify `region` , `total_file_size` , `upload_timeout` , and `use_put_object` as options.\n\nThis parameter requires version 1.19 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`", + "Options": "The configuration options to send to the log driver.\n\nThe options you can specify depend on the log driver. Some of the options you can specify when you use the `awslogs` log driver to route logs to Amazon CloudWatch include the following:\n\n- **awslogs-create-group** - Required: No\n\nSpecify whether you want the log group to be created automatically. If this option isn't specified, it defaults to `false` .\n\n> Your IAM policy must include the `logs:CreateLogGroup` permission before you attempt to use `awslogs-create-group` .\n- **awslogs-region** - Required: Yes\n\nSpecify the AWS Region that the `awslogs` log driver is to send your Docker logs to. You can choose to send all of your logs from clusters in different Regions to a single region in CloudWatch Logs. This is so that they're all visible in one location. Otherwise, you can separate them by Region for more granularity. Make sure that the specified log group exists in the Region that you specify with this option.\n- **awslogs-group** - Required: Yes\n\nMake sure to specify a log group that the `awslogs` log driver sends its log streams to.\n- **awslogs-stream-prefix** - Required: Yes, when using the Fargate launch type.Optional for the EC2 launch type, required for the Fargate launch type.\n\nUse the `awslogs-stream-prefix` option to associate a log stream with the specified prefix, the container name, and the ID of the Amazon ECS task that the container belongs to. If you specify a prefix with this option, then the log stream takes the format `prefix-name/container-name/ecs-task-id` .\n\nIf you don't specify a prefix with this option, then the log stream is named after the container ID that's assigned by the Docker daemon on the container instance. Because it's difficult to trace logs back to the container that sent them with just the Docker container ID (which is only available on the container instance), we recommend that you specify a prefix with this option.\n\nFor Amazon ECS services, you can use the service name as the prefix. Doing so, you can trace log streams to the service that the container belongs to, the name of the container that sent them, and the ID of the task that the container belongs to.\n\nYou must specify a stream-prefix for your logs to have your logs appear in the Log pane when using the Amazon ECS console.\n- **awslogs-datetime-format** - Required: No\n\nThis option defines a multiline start pattern in Python `strftime` format. A log message consists of a line that matches the pattern and any following lines that don\u2019t match the pattern. The matched line is the delimiter between log messages.\n\nOne example of a use case for using this format is for parsing output such as a stack dump, which might otherwise be logged in multiple entries. The correct pattern allows it to be captured in a single entry.\n\nFor more information, see [awslogs-datetime-format](https://docs.aws.amazon.com/https://docs.docker.com/config/containers/logging/awslogs/#awslogs-datetime-format) .\n\nYou cannot configure both the `awslogs-datetime-format` and `awslogs-multiline-pattern` options.\n\n> Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.\n- **awslogs-multiline-pattern** - Required: No\n\nThis option defines a multiline start pattern that uses a regular expression. A log message consists of a line that matches the pattern and any following lines that don\u2019t match the pattern. The matched line is the delimiter between log messages.\n\nFor more information, see [awslogs-multiline-pattern](https://docs.aws.amazon.com/https://docs.docker.com/config/containers/logging/awslogs/#awslogs-multiline-pattern) .\n\nThis option is ignored if `awslogs-datetime-format` is also configured.\n\nYou cannot configure both the `awslogs-datetime-format` and `awslogs-multiline-pattern` options.\n\n> Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.\n- **mode** - Required: No\n\nValid values: `non-blocking` | `blocking`\n\nThis option defines the delivery mode of log messages from the container to CloudWatch Logs. The delivery mode you choose affects application availability when the flow of logs from container to CloudWatch is interrupted.\n\nIf you use the `blocking` mode and the flow of logs to CloudWatch is interrupted, calls from container code to write to the `stdout` and `stderr` streams will block. The logging thread of the application will block as a result. This may cause the application to become unresponsive and lead to container healthcheck failure.\n\nIf you use the `non-blocking` mode, the container's logs are instead stored in an in-memory intermediate buffer configured with the `max-buffer-size` option. This prevents the application from becoming unresponsive when logs cannot be sent to CloudWatch. We recommend using this mode if you want to ensure service availability and are okay with some log loss. For more information, see [Preventing log loss with non-blocking mode in the `awslogs` container log driver](https://docs.aws.amazon.com/containers/preventing-log-loss-with-non-blocking-mode-in-the-awslogs-container-log-driver/) .\n- **max-buffer-size** - Required: No\n\nDefault value: `1m`\n\nWhen `non-blocking` mode is used, the `max-buffer-size` log option controls the size of the buffer that's used for intermediate message storage. Make sure to specify an adequate buffer size based on your application. When the buffer fills up, further logs cannot be stored. Logs that cannot be stored are lost.\n\nTo route logs using the `splunk` log router, you need to specify a `splunk-token` and a `splunk-url` .\n\nWhen you use the `awsfirelens` log router to route logs to an AWS Service or AWS Partner Network destination for log storage and analytics, you can set the `log-driver-buffer-limit` option to limit the number of events that are buffered in memory, before being sent to the log router container. It can help to resolve potential log loss issue because high throughput might result in memory running out for the buffer inside of Docker.\n\nOther options you can specify when using `awsfirelens` to route logs depend on the destination. When you export logs to Amazon Data Firehose, you can specify the AWS Region with `region` and a name for the log stream with `delivery_stream` .\n\nWhen you export logs to Amazon Kinesis Data Streams, you can specify an AWS Region with `region` and a data stream name with `stream` .\n\nWhen you export logs to Amazon OpenSearch Service, you can specify options like `Name` , `Host` (OpenSearch Service endpoint without protocol), `Port` , `Index` , `Type` , `Aws_auth` , `Aws_region` , `Suppress_Type_Name` , and `tls` . For more information, see [Under the hood: FireLens for Amazon ECS Tasks](https://docs.aws.amazon.com/containers/under-the-hood-firelens-for-amazon-ecs-tasks/) .\n\nWhen you export logs to Amazon S3, you can specify the bucket using the `bucket` option. You can also specify `region` , `total_file_size` , `upload_timeout` , and `use_put_object` as options.\n\nThis parameter requires version 1.19 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`", "SecretOptions": "The secrets to pass to the log configuration. For more information, see [Specifying sensitive data](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/specifying-sensitive-data.html) in the *Amazon Elastic Container Service Developer Guide* ." }, "AWS::ECS::Service NetworkConfiguration": { @@ -14893,6 +15117,7 @@ "AWS::ECS::TaskDefinition": { "ContainerDefinitions": "A list of container definitions in JSON format that describe the different containers that make up your task. For more information about container definition parameters and defaults, see [Amazon ECS Task Definitions](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_defintions.html) in the *Amazon Elastic Container Service Developer Guide* .", "Cpu": "The number of `cpu` units used by the task. If you use the EC2 launch type, this field is optional. Any value can be used. If you use the Fargate launch type, this field is required. You must use one of the following values. The value that you choose determines your range of valid values for the `memory` parameter.\n\nIf you use the EC2 launch type, this field is optional. Supported values are between `128` CPU units ( `0.125` vCPUs) and `10240` CPU units ( `10` vCPUs).\n\nThe CPU units cannot be less than 1 vCPU when you use Windows containers on Fargate.\n\n- 256 (.25 vCPU) - Available `memory` values: 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB)\n- 512 (.5 vCPU) - Available `memory` values: 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB)\n- 1024 (1 vCPU) - Available `memory` values: 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB)\n- 2048 (2 vCPU) - Available `memory` values: 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB)\n- 4096 (4 vCPU) - Available `memory` values: 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB)\n- 8192 (8 vCPU) - Available `memory` values: 16 GB and 60 GB in 4 GB increments\n\nThis option requires Linux platform `1.4.0` or later.\n- 16384 (16vCPU) - Available `memory` values: 32GB and 120 GB in 8 GB increments\n\nThis option requires Linux platform `1.4.0` or later.", + "EnableFaultInjection": "Enables fault injection and allows for fault injection requests to be accepted from the task's containers. The default value is `false` .", "EphemeralStorage": "The ephemeral storage settings to use for tasks run with the task definition.", "ExecutionRoleArn": "The Amazon Resource Name (ARN) of the task execution role that grants the Amazon ECS container agent permission to make AWS API calls on your behalf. For informationabout the required IAM roles for Amazon ECS, see [IAM roles for Amazon ECS](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/security-ecs-iam-role-overview.html) in the *Amazon Elastic Container Service Developer Guide* .", "Family": "The name of a family that this task definition is registered to. Up to 255 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed.\n\nA family groups multiple versions of a task definition. Amazon ECS gives the first task definition that you registered to a family a revision number of 1. Amazon ECS gives sequential revision numbers to each task definition that you add.\n\n> To use revision numbers when you update a task definition, specify this property. If you don't specify a value, AWS CloudFormation generates a new task definition each time that you update it.", @@ -14985,7 +15210,7 @@ "Value": "The Amazon Resource Name (ARN) of the Amazon S3 object containing the environment variable file." }, "AWS::ECS::TaskDefinition EphemeralStorage": { - "SizeInGiB": "The total amount, in GiB, of ephemeral storage to set for the task. The minimum supported value is `20` GiB and the maximum supported value is `200` GiB." + "SizeInGiB": "The total amount, in GiB, of ephemeral storage to set for the task. The minimum supported value is `21` GiB and the maximum supported value is `200` GiB." }, "AWS::ECS::TaskDefinition FSxAuthorizationConfig": { "CredentialsParameter": "The authorization credential option to use. The authorization credential options can be provided using either the Amazon Resource Name (ARN) of an AWS Secrets Manager secret or SSM Parameter Store parameter. The ARN refers to the stored credentials.", @@ -15037,7 +15262,7 @@ }, "AWS::ECS::TaskDefinition LogConfiguration": { "LogDriver": "The log driver to use for the container.\n\nFor tasks on AWS Fargate , the supported log drivers are `awslogs` , `splunk` , and `awsfirelens` .\n\nFor tasks hosted on Amazon EC2 instances, the supported log drivers are `awslogs` , `fluentd` , `gelf` , `json-file` , `journald` , `syslog` , `splunk` , and `awsfirelens` .\n\nFor more information about using the `awslogs` log driver, see [Send Amazon ECS logs to CloudWatch](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using_awslogs.html) in the *Amazon Elastic Container Service Developer Guide* .\n\nFor more information about using the `awsfirelens` log driver, see [Send Amazon ECS logs to an AWS service or AWS Partner](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using_firelens.html) .\n\n> If you have a custom driver that isn't listed, you can fork the Amazon ECS container agent project that's [available on GitHub](https://docs.aws.amazon.com/https://github.com/aws/amazon-ecs-agent) and customize it to work with that driver. We encourage you to submit pull requests for changes that you would like to have included. However, we don't currently provide support for running modified copies of this software.", - "Options": "The configuration options to send to the log driver.\n\nThe options you can specify depend on the log driver. Some of the options you can specify when you use the `awslogs` log driver to route logs to Amazon CloudWatch include the following:\n\n- **awslogs-create-group** - Required: No\n\nSpecify whether you want the log group to be created automatically. If this option isn't specified, it defaults to `false` .\n\n> Your IAM policy must include the `logs:CreateLogGroup` permission before you attempt to use `awslogs-create-group` .\n- **awslogs-region** - Required: Yes\n\nSpecify the AWS Region that the `awslogs` log driver is to send your Docker logs to. You can choose to send all of your logs from clusters in different Regions to a single region in CloudWatch Logs. This is so that they're all visible in one location. Otherwise, you can separate them by Region for more granularity. Make sure that the specified log group exists in the Region that you specify with this option.\n- **awslogs-group** - Required: Yes\n\nMake sure to specify a log group that the `awslogs` log driver sends its log streams to.\n- **awslogs-stream-prefix** - Required: Yes, when using the Fargate launch type.Optional for the EC2 launch type, required for the Fargate launch type.\n\nUse the `awslogs-stream-prefix` option to associate a log stream with the specified prefix, the container name, and the ID of the Amazon ECS task that the container belongs to. If you specify a prefix with this option, then the log stream takes the format `prefix-name/container-name/ecs-task-id` .\n\nIf you don't specify a prefix with this option, then the log stream is named after the container ID that's assigned by the Docker daemon on the container instance. Because it's difficult to trace logs back to the container that sent them with just the Docker container ID (which is only available on the container instance), we recommend that you specify a prefix with this option.\n\nFor Amazon ECS services, you can use the service name as the prefix. Doing so, you can trace log streams to the service that the container belongs to, the name of the container that sent them, and the ID of the task that the container belongs to.\n\nYou must specify a stream-prefix for your logs to have your logs appear in the Log pane when using the Amazon ECS console.\n- **awslogs-datetime-format** - Required: No\n\nThis option defines a multiline start pattern in Python `strftime` format. A log message consists of a line that matches the pattern and any following lines that don\u2019t match the pattern. The matched line is the delimiter between log messages.\n\nOne example of a use case for using this format is for parsing output such as a stack dump, which might otherwise be logged in multiple entries. The correct pattern allows it to be captured in a single entry.\n\nFor more information, see [awslogs-datetime-format](https://docs.aws.amazon.com/https://docs.docker.com/config/containers/logging/awslogs/#awslogs-datetime-format) .\n\nYou cannot configure both the `awslogs-datetime-format` and `awslogs-multiline-pattern` options.\n\n> Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.\n- **awslogs-multiline-pattern** - Required: No\n\nThis option defines a multiline start pattern that uses a regular expression. A log message consists of a line that matches the pattern and any following lines that don\u2019t match the pattern. The matched line is the delimiter between log messages.\n\nFor more information, see [awslogs-multiline-pattern](https://docs.aws.amazon.com/https://docs.docker.com/config/containers/logging/awslogs/#awslogs-multiline-pattern) .\n\nThis option is ignored if `awslogs-datetime-format` is also configured.\n\nYou cannot configure both the `awslogs-datetime-format` and `awslogs-multiline-pattern` options.\n\n> Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.\n- **mode** - Required: No\n\nValid values: `non-blocking` | `blocking`\n\nThis option defines the delivery mode of log messages from the container to CloudWatch Logs. The delivery mode you choose affects application availability when the flow of logs from container to CloudWatch is interrupted.\n\nIf you use the `blocking` mode and the flow of logs to CloudWatch is interrupted, calls from container code to write to the `stdout` and `stderr` streams will block. The logging thread of the application will block as a result. This may cause the application to become unresponsive and lead to container healthcheck failure.\n\nIf you use the `non-blocking` mode, the container's logs are instead stored in an in-memory intermediate buffer configured with the `max-buffer-size` option. This prevents the application from becoming unresponsive when logs cannot be sent to CloudWatch. We recommend using this mode if you want to ensure service availability and are okay with some log loss. For more information, see [Preventing log loss with non-blocking mode in the `awslogs` container log driver](https://docs.aws.amazon.com/containers/preventing-log-loss-with-non-blocking-mode-in-the-awslogs-container-log-driver/) .\n- **max-buffer-size** - Required: No\n\nDefault value: `1m`\n\nWhen `non-blocking` mode is used, the `max-buffer-size` log option controls the size of the buffer that's used for intermediate message storage. Make sure to specify an adequate buffer size based on your application. When the buffer fills up, further logs cannot be stored. Logs that cannot be stored are lost.\n\nTo route logs using the `splunk` log router, you need to specify a `splunk-token` and a `splunk-url` .\n\nWhen you use the `awsfirelens` log router to route logs to an AWS Service or AWS Partner Network destination for log storage and analytics, you can set the `log-driver-buffer-limit` option to limit the number of events that are buffered in memory, before being sent to the log router container. It can help to resolve potential log loss issue because high throughput might result in memory running out for the buffer inside of Docker.\n\nOther options you can specify when using `awsfirelens` to route logs depend on the destination. When you export logs to Amazon Data Firehose, you can specify the AWS Region with `region` and a name for the log stream with `delivery_stream` .\n\nWhen you export logs to Amazon Kinesis Data Streams, you can specify an AWS Region with `region` and a data stream name with `stream` .\n\nWhen you export logs to Amazon OpenSearch Service, you can specify options like `Name` , `Host` (OpenSearch Service endpoint without protocol), `Port` , `Index` , `Type` , `Aws_auth` , `Aws_region` , `Suppress_Type_Name` , and `tls` .\n\nWhen you export logs to Amazon S3, you can specify the bucket using the `bucket` option. You can also specify `region` , `total_file_size` , `upload_timeout` , and `use_put_object` as options.\n\nThis parameter requires version 1.19 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`", + "Options": "The configuration options to send to the log driver.\n\nThe options you can specify depend on the log driver. Some of the options you can specify when you use the `awslogs` log driver to route logs to Amazon CloudWatch include the following:\n\n- **awslogs-create-group** - Required: No\n\nSpecify whether you want the log group to be created automatically. If this option isn't specified, it defaults to `false` .\n\n> Your IAM policy must include the `logs:CreateLogGroup` permission before you attempt to use `awslogs-create-group` .\n- **awslogs-region** - Required: Yes\n\nSpecify the AWS Region that the `awslogs` log driver is to send your Docker logs to. You can choose to send all of your logs from clusters in different Regions to a single region in CloudWatch Logs. This is so that they're all visible in one location. Otherwise, you can separate them by Region for more granularity. Make sure that the specified log group exists in the Region that you specify with this option.\n- **awslogs-group** - Required: Yes\n\nMake sure to specify a log group that the `awslogs` log driver sends its log streams to.\n- **awslogs-stream-prefix** - Required: Yes, when using the Fargate launch type.Optional for the EC2 launch type, required for the Fargate launch type.\n\nUse the `awslogs-stream-prefix` option to associate a log stream with the specified prefix, the container name, and the ID of the Amazon ECS task that the container belongs to. If you specify a prefix with this option, then the log stream takes the format `prefix-name/container-name/ecs-task-id` .\n\nIf you don't specify a prefix with this option, then the log stream is named after the container ID that's assigned by the Docker daemon on the container instance. Because it's difficult to trace logs back to the container that sent them with just the Docker container ID (which is only available on the container instance), we recommend that you specify a prefix with this option.\n\nFor Amazon ECS services, you can use the service name as the prefix. Doing so, you can trace log streams to the service that the container belongs to, the name of the container that sent them, and the ID of the task that the container belongs to.\n\nYou must specify a stream-prefix for your logs to have your logs appear in the Log pane when using the Amazon ECS console.\n- **awslogs-datetime-format** - Required: No\n\nThis option defines a multiline start pattern in Python `strftime` format. A log message consists of a line that matches the pattern and any following lines that don\u2019t match the pattern. The matched line is the delimiter between log messages.\n\nOne example of a use case for using this format is for parsing output such as a stack dump, which might otherwise be logged in multiple entries. The correct pattern allows it to be captured in a single entry.\n\nFor more information, see [awslogs-datetime-format](https://docs.aws.amazon.com/https://docs.docker.com/config/containers/logging/awslogs/#awslogs-datetime-format) .\n\nYou cannot configure both the `awslogs-datetime-format` and `awslogs-multiline-pattern` options.\n\n> Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.\n- **awslogs-multiline-pattern** - Required: No\n\nThis option defines a multiline start pattern that uses a regular expression. A log message consists of a line that matches the pattern and any following lines that don\u2019t match the pattern. The matched line is the delimiter between log messages.\n\nFor more information, see [awslogs-multiline-pattern](https://docs.aws.amazon.com/https://docs.docker.com/config/containers/logging/awslogs/#awslogs-multiline-pattern) .\n\nThis option is ignored if `awslogs-datetime-format` is also configured.\n\nYou cannot configure both the `awslogs-datetime-format` and `awslogs-multiline-pattern` options.\n\n> Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.\n- **mode** - Required: No\n\nValid values: `non-blocking` | `blocking`\n\nThis option defines the delivery mode of log messages from the container to CloudWatch Logs. The delivery mode you choose affects application availability when the flow of logs from container to CloudWatch is interrupted.\n\nIf you use the `blocking` mode and the flow of logs to CloudWatch is interrupted, calls from container code to write to the `stdout` and `stderr` streams will block. The logging thread of the application will block as a result. This may cause the application to become unresponsive and lead to container healthcheck failure.\n\nIf you use the `non-blocking` mode, the container's logs are instead stored in an in-memory intermediate buffer configured with the `max-buffer-size` option. This prevents the application from becoming unresponsive when logs cannot be sent to CloudWatch. We recommend using this mode if you want to ensure service availability and are okay with some log loss. For more information, see [Preventing log loss with non-blocking mode in the `awslogs` container log driver](https://docs.aws.amazon.com/containers/preventing-log-loss-with-non-blocking-mode-in-the-awslogs-container-log-driver/) .\n- **max-buffer-size** - Required: No\n\nDefault value: `1m`\n\nWhen `non-blocking` mode is used, the `max-buffer-size` log option controls the size of the buffer that's used for intermediate message storage. Make sure to specify an adequate buffer size based on your application. When the buffer fills up, further logs cannot be stored. Logs that cannot be stored are lost.\n\nTo route logs using the `splunk` log router, you need to specify a `splunk-token` and a `splunk-url` .\n\nWhen you use the `awsfirelens` log router to route logs to an AWS Service or AWS Partner Network destination for log storage and analytics, you can set the `log-driver-buffer-limit` option to limit the number of events that are buffered in memory, before being sent to the log router container. It can help to resolve potential log loss issue because high throughput might result in memory running out for the buffer inside of Docker.\n\nOther options you can specify when using `awsfirelens` to route logs depend on the destination. When you export logs to Amazon Data Firehose, you can specify the AWS Region with `region` and a name for the log stream with `delivery_stream` .\n\nWhen you export logs to Amazon Kinesis Data Streams, you can specify an AWS Region with `region` and a data stream name with `stream` .\n\nWhen you export logs to Amazon OpenSearch Service, you can specify options like `Name` , `Host` (OpenSearch Service endpoint without protocol), `Port` , `Index` , `Type` , `Aws_auth` , `Aws_region` , `Suppress_Type_Name` , and `tls` . For more information, see [Under the hood: FireLens for Amazon ECS Tasks](https://docs.aws.amazon.com/containers/under-the-hood-firelens-for-amazon-ecs-tasks/) .\n\nWhen you export logs to Amazon S3, you can specify the bucket using the `bucket` option. You can also specify `region` , `total_file_size` , `upload_timeout` , and `use_put_object` as options.\n\nThis parameter requires version 1.19 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`", "SecretOptions": "The secrets to pass to the log configuration. For more information, see [Specifying sensitive data](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/specifying-sensitive-data.html) in the *Amazon Elastic Container Service Developer Guide* ." }, "AWS::ECS::TaskDefinition MountPoint": { @@ -15127,7 +15352,7 @@ "TaskDefinition": "The task definition for the tasks in the task set to use. If a revision isn't specified, the latest `ACTIVE` revision is used." }, "AWS::ECS::TaskSet AwsVpcConfiguration": { - "AssignPublicIp": "Whether the task's elastic network interface receives a public IP address. The default value is `DISABLED` .", + "AssignPublicIp": "Whether the task's elastic network interface receives a public IP address. The default value is `ENABLED` .", "SecurityGroups": "The IDs of the security groups associated with the task or service. If you don't specify a security group, the default security group for the VPC is used. There's a limit of 5 security groups that can be specified per `awsvpcConfiguration` .\n\n> All specified security groups must be from the same VPC.", "Subnets": "The IDs of the subnets associated with the task or service. There's a limit of 16 subnets that can be specified per `awsvpcConfiguration` .\n\n> All specified subnets must be from the same VPC." }, @@ -15206,7 +15431,7 @@ "Value": "The value of the tag key." }, "AWS::EFS::FileSystem FileSystemProtection": { - "ReplicationOverwriteProtection": "The status of the file system's replication overwrite protection.\n\n- `ENABLED` \u2013 The file system cannot be used as the destination file system in a replication configuration. The file system is writeable. Replication overwrite protection is `ENABLED` by default.\n- `DISABLED` \u2013 The file system can be used as the destination file system in a replication configuration. The file system is read-only and can only be modified by EFS replication.\n- `REPLICATING` \u2013 The file system is being used as the destination file system in a replication configuration. The file system is read-only and is only modified only by EFS replication.\n\nIf the replication configuration is deleted, the file system's replication overwrite protection is re-enabled, the file system becomes writeable." + "ReplicationOverwriteProtection": "The status of the file system's replication overwrite protection.\n\n- `ENABLED` \u2013 The file system cannot be used as the destination file system in a replication configuration. The file system is writeable. Replication overwrite protection is `ENABLED` by default.\n- `DISABLED` \u2013 The file system can be used as the destination file system in a replication configuration. The file system is read-only and can only be modified by EFS replication.\n- `REPLICATING` \u2013 The file system is being used as the destination file system in a replication configuration. The file system is read-only and is modified only by EFS replication.\n\nIf the replication configuration is deleted, the file system's replication overwrite protection is re-enabled, the file system becomes writeable." }, "AWS::EFS::FileSystem LifecyclePolicy": { "TransitionToArchive": "The number of days after files were last accessed in primary storage (the Standard storage class) at which to move them to Archive storage. Metadata operations such as listing the contents of a directory don't count as file access events.", @@ -15221,9 +15446,9 @@ "FileSystemId": "The ID of the destination Amazon EFS file system.", "KmsKeyId": "The ID of an AWS KMS key used to protect the encrypted file system.", "Region": "The AWS Region in which the destination file system is located.\n\n> For One Zone file systems, the replication configuration must specify the AWS Region in which the destination file system is located.", - "RoleArn": "", - "Status": "", - "StatusMessage": "" + "RoleArn": "The Amazon Resource Name (ARN) of the current source file system in the replication configuration.", + "Status": "Describes the status of the replication configuration. For more information about replication status, see [Viewing replication details](https://docs.aws.amazon.com//efs/latest/ug/awsbackup.html#restoring-backup-efsmonitoring-replication-status.html) in the *Amazon EFS User Guide* .", + "StatusMessage": "Message that provides details about the `PAUSED` or `ERRROR` state of the replication destination configuration. For more information about replication status messages, see [Viewing replication details](https://docs.aws.amazon.com//efs/latest/ug/awsbackup.html#restoring-backup-efsmonitoring-replication-status.html) in the *Amazon EFS User Guide* ." }, "AWS::EFS::MountTarget": { "FileSystemId": "The ID of the file system for which to create the mount target.", @@ -15257,7 +15482,7 @@ "AddonVersion": "The version of the add-on.", "ClusterName": "The name of your cluster.", "ConfigurationValues": "The configuration values that you provided.", - "PodIdentityAssociations": "An array of Pod Identity Assocations owned by the Addon. Each EKS Pod Identity association maps a role to a service account in a namespace in the cluster.\n\nFor more information, see [Attach an IAM Role to an Amazon EKS add-on using Pod Identity](https://docs.aws.amazon.com/eks/latest/userguide/add-ons-iam.html) in the EKS User Guide.", + "PodIdentityAssociations": "An array of Pod Identity Assocations owned by the Addon. Each EKS Pod Identity association maps a role to a service account in a namespace in the cluster.\n\nFor more information, see [Attach an IAM Role to an Amazon EKS add-on using Pod Identity](https://docs.aws.amazon.com/eks/latest/userguide/add-ons-iam.html) in the *Amazon EKS User Guide* .", "PreserveOnDelete": "Specifying this option preserves the add-on software on your cluster but Amazon EKS stops managing any settings for the add-on. If an IAM account is associated with the add-on, it isn't removed.", "ResolveConflicts": "How to resolve field value conflicts for an Amazon EKS add-on. Conflicts are handled based on the value you choose:\n\n- *None* \u2013 If the self-managed version of the add-on is installed on your cluster, Amazon EKS doesn't change the value. Creation of the add-on might fail.\n- *Overwrite* \u2013 If the self-managed version of the add-on is installed on your cluster and the Amazon EKS default value is different than the existing value, Amazon EKS changes the value to the Amazon EKS default value.\n- *Preserve* \u2013 This is similar to the NONE option. If the self-managed version of the add-on is installed on your cluster Amazon EKS doesn't change the add-on resource properties. Creation of the add-on might fail if conflicts are detected. This option works differently during the update operation. For more information, see [UpdateAddon](https://docs.aws.amazon.com/eks/latest/APIReference/API_UpdateAddon.html) .\n\nIf you don't currently have the self-managed version of the add-on installed on your cluster, the Amazon EKS add-on is installed. Amazon EKS sets all values to default values, regardless of the option that you specify.", "ServiceAccountRoleArn": "The Amazon Resource Name (ARN) of an existing IAM role to bind to the add-on's service account. The role must be assigned the IAM permissions required by the add-on. If you don't specify an existing IAM role, then the add-on uses the permissions assigned to the node IAM role. For more information, see [Amazon EKS node IAM role](https://docs.aws.amazon.com/eks/latest/userguide/create-node-role.html) in the *Amazon EKS User Guide* .\n\n> To specify an existing IAM role, you must have an IAM OpenID Connect (OIDC) provider created for your cluster. For more information, see [Enabling IAM roles for service accounts on your cluster](https://docs.aws.amazon.com/eks/latest/userguide/enable-iam-roles-for-service-accounts.html) in the *Amazon EKS User Guide* .", @@ -15274,18 +15499,18 @@ "AWS::EKS::Cluster": { "AccessConfig": "The access configuration for the cluster.", "BootstrapSelfManagedAddons": "If you set this value to `False` when creating a cluster, the default networking add-ons will not be installed.\n\nThe default networking addons include vpc-cni, coredns, and kube-proxy.\n\nUse this option when you plan to install third-party alternative add-ons or self-manage the default networking add-ons.", - "ComputeConfig": "", + "ComputeConfig": "Indicates the current configuration of the compute capability on your EKS Auto Mode cluster. For example, if the capability is enabled or disabled. If the compute capability is enabled, EKS Auto Mode will create and delete EC2 Managed Instances in your AWS account. For more information, see EKS Auto Mode compute capability in the *Amazon EKS User Guide* .", "EncryptionConfig": "The encryption configuration for the cluster.", "KubernetesNetworkConfig": "The Kubernetes network configuration for the cluster.", "Logging": "The logging configuration for your cluster.", "Name": "The unique name to give to your cluster. The name can contain only alphanumeric characters (case-sensitive) and hyphens. It must start with an alphanumeric character and can't be longer than 100 characters. The name must be unique within the AWS Region and AWS account that you're creating the cluster in. Note that underscores can't be used in AWS CloudFormation .", "OutpostConfig": "An object representing the configuration of your local Amazon EKS cluster on an AWS Outpost. This object isn't available for clusters on the AWS cloud.", - "RemoteNetworkConfig": "", + "RemoteNetworkConfig": "The configuration in the cluster for EKS Hybrid Nodes. You can't change or update this configuration after the cluster is created.", "ResourcesVpcConfig": "The VPC configuration that's used by the cluster control plane. Amazon EKS VPC resources have specific requirements to work properly with Kubernetes. For more information, see [Cluster VPC Considerations](https://docs.aws.amazon.com/eks/latest/userguide/network_reqs.html) and [Cluster Security Group Considerations](https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html) in the *Amazon EKS User Guide* . You must specify at least two subnets. You can specify up to five security groups, but we recommend that you use a dedicated security group for your cluster control plane.", "RoleArn": "The Amazon Resource Name (ARN) of the IAM role that provides permissions for the Kubernetes control plane to make calls to AWS API operations on your behalf. For more information, see [Amazon EKS Service IAM Role](https://docs.aws.amazon.com/eks/latest/userguide/service_IAM_role.html) in the **Amazon EKS User Guide** .", - "StorageConfig": "", + "StorageConfig": "Indicates the current configuration of the block storage capability on your EKS Auto Mode cluster. For example, if the capability is enabled or disabled. If the block storage capability is enabled, EKS Auto Mode will create and delete EBS volumes in your AWS account. For more information, see EKS Auto Mode block storage capability in the *Amazon EKS User Guide* .", "Tags": "The metadata that you apply to the cluster to assist with categorization and organization. Each tag consists of a key and an optional value, both of which you define. Cluster tags don't propagate to any other resources associated with the cluster.\n\n> You must have the `eks:TagResource` and `eks:UntagResource` permissions for your [IAM principal](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html) to manage the AWS CloudFormation stack. If you don't have these permissions, there might be unexpected behavior with stack-level tags propagating to the resource during resource creation and update.", - "UpgradePolicy": "This value indicates if extended support is enabled or disabled for the cluster.\n\n[Learn more about EKS Extended Support in the EKS User Guide.](https://docs.aws.amazon.com/eks/latest/userguide/extended-support-control.html)", + "UpgradePolicy": "This value indicates if extended support is enabled or disabled for the cluster.\n\n[Learn more about EKS Extended Support in the *Amazon EKS User Guide* .](https://docs.aws.amazon.com/eks/latest/userguide/extended-support-control.html)", "Version": "The desired Kubernetes version for your cluster. If you don't specify a value here, the default version available in Amazon EKS is used.\n\n> The default version might not be the latest version available.", "ZonalShiftConfig": "The configuration for zonal shift for the cluster." }, @@ -15294,29 +15519,29 @@ "BootstrapClusterCreatorAdminPermissions": "Specifies whether or not the cluster creator IAM principal was set as a cluster admin access entry during cluster creation time. The default value is `true` ." }, "AWS::EKS::Cluster BlockStorage": { - "Enabled": "" + "Enabled": "Indicates if the block storage capability is enabled on your EKS Auto Mode cluster. If the block storage capability is enabled, EKS Auto Mode will create and delete EBS volumes in your AWS account." }, "AWS::EKS::Cluster ClusterLogging": { "EnabledTypes": "The enabled control plane logs for your cluster. All log types are disabled if the array is empty.\n\n> When updating a resource, you must include this `EnabledTypes` property if the previous CloudFormation template of the resource had it." }, "AWS::EKS::Cluster ComputeConfig": { - "Enabled": "", - "NodePools": "", - "NodeRoleArn": "" + "Enabled": "Request to enable or disable the compute capability on your EKS Auto Mode cluster. If the compute capability is enabled, EKS Auto Mode will create and delete EC2 Managed Instances in your AWS account.", + "NodePools": "Configuration for node pools that defines the compute resources for your EKS Auto Mode cluster. For more information, see EKS Auto Mode Node Pools in the *Amazon EKS User Guide* .", + "NodeRoleArn": "The ARN of the IAM Role EKS will assign to EC2 Managed Instances in your EKS Auto Mode cluster. This value cannot be changed after the compute capability of EKS Auto Mode is enabled. For more information, see the IAM Reference in the *Amazon EKS User Guide* ." }, "AWS::EKS::Cluster ControlPlanePlacement": { "GroupName": "The name of the placement group for the Kubernetes control plane instances. This property is only used for a local cluster on an AWS Outpost." }, "AWS::EKS::Cluster ElasticLoadBalancing": { - "Enabled": "" + "Enabled": "Indicates if the load balancing capability is enabled on your EKS Auto Mode cluster. If the load balancing capability is enabled, EKS Auto Mode will create and delete load balancers in your AWS account." }, "AWS::EKS::Cluster EncryptionConfig": { "Provider": "The encryption provider for the cluster.", "Resources": "Specifies the resources to be encrypted. The only supported value is `secrets` ." }, "AWS::EKS::Cluster KubernetesNetworkConfig": { - "ElasticLoadBalancing": "", - "IpFamily": "Specify which IP family is used to assign Kubernetes pod and service IP addresses. If you don't specify a value, `ipv4` is used by default. You can only specify an IP family when you create a cluster and can't change this value once the cluster is created. If you specify `ipv6` , the VPC and subnets that you specify for cluster creation must have both `IPv4` and `IPv6` CIDR blocks assigned to them. You can't specify `ipv6` for clusters in China Regions.\n\nYou can only specify `ipv6` for `1.21` and later clusters that use version `1.10.1` or later of the Amazon VPC CNI add-on. If you specify `ipv6` , then ensure that your VPC meets the requirements listed in the considerations listed in [Assigning IPv6 addresses to pods and services](https://docs.aws.amazon.com/eks/latest/userguide/cni-ipv6.html) in the Amazon EKS User Guide. Kubernetes assigns services `IPv6` addresses from the unique local address range `(fc00::/7)` . You can't specify a custom `IPv6` CIDR block. Pod addresses are assigned from the subnet's `IPv6` CIDR.", + "ElasticLoadBalancing": "Request to enable or disable the load balancing capability on your EKS Auto Mode cluster. For more information, see EKS Auto Mode load balancing capability in the *Amazon EKS User Guide* .", + "IpFamily": "Specify which IP family is used to assign Kubernetes pod and service IP addresses. If you don't specify a value, `ipv4` is used by default. You can only specify an IP family when you create a cluster and can't change this value once the cluster is created. If you specify `ipv6` , the VPC and subnets that you specify for cluster creation must have both `IPv4` and `IPv6` CIDR blocks assigned to them. You can't specify `ipv6` for clusters in China Regions.\n\nYou can only specify `ipv6` for `1.21` and later clusters that use version `1.10.1` or later of the Amazon VPC CNI add-on. If you specify `ipv6` , then ensure that your VPC meets the requirements listed in the considerations listed in [Assigning IPv6 addresses to pods and services](https://docs.aws.amazon.com/eks/latest/userguide/cni-ipv6.html) in the *Amazon EKS User Guide* . Kubernetes assigns services `IPv6` addresses from the unique local address range `(fc00::/7)` . You can't specify a custom `IPv6` CIDR block. Pod addresses are assigned from the subnet's `IPv6` CIDR.", "ServiceIpv4Cidr": "Don't specify a value if you select `ipv6` for *ipFamily* . The CIDR block to assign Kubernetes service IP addresses from. If you don't specify a block, Kubernetes assigns addresses from either the `10.100.0.0/16` or `172.20.0.0/16` CIDR blocks. We recommend that you specify a block that does not overlap with resources in other networks that are peered or connected to your VPC. The block must meet the following requirements:\n\n- Within one of the following private IP address blocks: `10.0.0.0/8` , `172.16.0.0/12` , or `192.168.0.0/16` .\n- Doesn't overlap with any CIDR block assigned to the VPC that you selected for VPC.\n- Between `/24` and `/12` .\n\n> You can only specify a custom CIDR block when you create a cluster. You can't change this value after the cluster is created.", "ServiceIpv6Cidr": "The CIDR block that Kubernetes pod and service IP addresses are assigned from if you created a 1.21 or later cluster with version 1.10.1 or later of the Amazon VPC CNI add-on and specified `ipv6` for *ipFamily* when you created the cluster. Kubernetes assigns service addresses from the unique local address range ( `fc00::/7` ) because you can't specify a custom IPv6 CIDR block when you create the cluster." }, @@ -15335,14 +15560,14 @@ "KeyArn": "Amazon Resource Name (ARN) or alias of the KMS key. The KMS key must be symmetric and created in the same AWS Region as the cluster. If the KMS key was created in a different account, the [IAM principal](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html) must have access to the KMS key. For more information, see [Allowing users in other accounts to use a KMS key](https://docs.aws.amazon.com/kms/latest/developerguide/key-policy-modifying-external-accounts.html) in the *AWS Key Management Service Developer Guide* ." }, "AWS::EKS::Cluster RemoteNetworkConfig": { - "RemoteNodeNetworks": "", - "RemotePodNetworks": "" + "RemoteNodeNetworks": "The list of network CIDRs that can contain hybrid nodes.\n\nThese CIDR blocks define the expected IP address range of the hybrid nodes that join the cluster. These blocks are typically determined by your network administrator.\n\nEnter one or more IPv4 CIDR blocks in decimal dotted-quad notation (for example, `10.2.0.0/16` ).\n\nIt must satisfy the following requirements:\n\n- Each block must be within an `IPv4` RFC-1918 network range. Minimum allowed size is /24, maximum allowed size is /8. Publicly-routable addresses aren't supported.\n- Each block cannot overlap with the range of the VPC CIDR blocks for your EKS resources, or the block of the Kubernetes service IP range.\n- Each block must have a route to the VPC that uses the VPC CIDR blocks, not public IPs or Elastic IPs. There are many options including AWS Transit Gateway , AWS Site-to-Site VPN , or AWS Direct Connect .\n- Each host must allow outbound connection to the EKS cluster control plane on TCP ports `443` and `10250` .\n- Each host must allow inbound connection from the EKS cluster control plane on TCP port 10250 for logs, exec and port-forward operations.\n- Each host must allow TCP and UDP network connectivity to and from other hosts that are running `CoreDNS` on UDP port `53` for service and pod DNS names.", + "RemotePodNetworks": "The list of network CIDRs that can contain pods that run Kubernetes webhooks on hybrid nodes.\n\nThese CIDR blocks are determined by configuring your Container Network Interface (CNI) plugin. We recommend the Calico CNI or Cilium CNI. Note that the Amazon VPC CNI plugin for Kubernetes isn't available for on-premises and edge locations.\n\nEnter one or more IPv4 CIDR blocks in decimal dotted-quad notation (for example, `10.2.0.0/16` ).\n\nIt must satisfy the following requirements:\n\n- Each block must be within an `IPv4` RFC-1918 network range. Minimum allowed size is /24, maximum allowed size is /8. Publicly-routable addresses aren't supported.\n- Each block cannot overlap with the range of the VPC CIDR blocks for your EKS resources, or the block of the Kubernetes service IP range." }, "AWS::EKS::Cluster RemoteNodeNetwork": { - "Cidrs": "" + "Cidrs": "A network CIDR that can contain hybrid nodes.\n\nThese CIDR blocks define the expected IP address range of the hybrid nodes that join the cluster. These blocks are typically determined by your network administrator.\n\nEnter one or more IPv4 CIDR blocks in decimal dotted-quad notation (for example, `10.2.0.0/16` ).\n\nIt must satisfy the following requirements:\n\n- Each block must be within an `IPv4` RFC-1918 network range. Minimum allowed size is /24, maximum allowed size is /8. Publicly-routable addresses aren't supported.\n- Each block cannot overlap with the range of the VPC CIDR blocks for your EKS resources, or the block of the Kubernetes service IP range.\n- Each block must have a route to the VPC that uses the VPC CIDR blocks, not public IPs or Elastic IPs. There are many options including AWS Transit Gateway , AWS Site-to-Site VPN , or AWS Direct Connect .\n- Each host must allow outbound connection to the EKS cluster control plane on TCP ports `443` and `10250` .\n- Each host must allow inbound connection from the EKS cluster control plane on TCP port 10250 for logs, exec and port-forward operations.\n- Each host must allow TCP and UDP network connectivity to and from other hosts that are running `CoreDNS` on UDP port `53` for service and pod DNS names." }, "AWS::EKS::Cluster RemotePodNetwork": { - "Cidrs": "" + "Cidrs": "A network CIDR that can contain pods that run Kubernetes webhooks on hybrid nodes.\n\nThese CIDR blocks are determined by configuring your Container Network Interface (CNI) plugin. We recommend the Calico CNI or Cilium CNI. Note that the Amazon VPC CNI plugin for Kubernetes isn't available for on-premises and edge locations.\n\nEnter one or more IPv4 CIDR blocks in decimal dotted-quad notation (for example, `10.2.0.0/16` ).\n\nIt must satisfy the following requirements:\n\n- Each block must be within an `IPv4` RFC-1918 network range. Minimum allowed size is /24, maximum allowed size is /8. Publicly-routable addresses aren't supported.\n- Each block cannot overlap with the range of the VPC CIDR blocks for your EKS resources, or the block of the Kubernetes service IP range." }, "AWS::EKS::Cluster ResourcesVpcConfig": { "EndpointPrivateAccess": "Set this value to `true` to enable private access for your cluster's Kubernetes API server endpoint. If you enable private access, Kubernetes API requests from within your cluster's VPC use the private VPC endpoint. The default value for this parameter is `false` , which disables private access for your Kubernetes API server. If you disable private access and you have nodes or AWS Fargate pods in the cluster, then ensure that `publicAccessCidrs` includes the necessary CIDR blocks for communication with the nodes or Fargate pods. For more information, see [Amazon EKS cluster endpoint access control](https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html) in the **Amazon EKS User Guide** .", @@ -15352,14 +15577,14 @@ "SubnetIds": "Specify subnets for your Amazon EKS nodes. Amazon EKS creates cross-account elastic network interfaces in these subnets to allow communication between your nodes and the Kubernetes control plane." }, "AWS::EKS::Cluster StorageConfig": { - "BlockStorage": "" + "BlockStorage": "Request to configure EBS Block Storage settings for your EKS Auto Mode cluster." }, "AWS::EKS::Cluster Tag": { "Key": "One part of a key-value pair that make up a tag. A `key` is a general label that acts like a category for more specific tag values.", "Value": "The optional part of a key-value pair that make up a tag. A `value` acts as a descriptor within a tag category (key)." }, "AWS::EKS::Cluster UpgradePolicy": { - "SupportType": "" + "SupportType": "If the cluster is set to `EXTENDED` , it will enter extended support at the end of standard support. If the cluster is set to `STANDARD` , it will be automatically upgraded at the end of standard support.\n\n[Learn more about EKS Extended Support in the *Amazon EKS User Guide* .](https://docs.aws.amazon.com/eks/latest/userguide/extended-support-control.html)" }, "AWS::EKS::Cluster ZonalShiftConfig": { "Enabled": "If zonal shift is enabled, AWS configures zonal autoshift for the cluster." @@ -15417,7 +15642,7 @@ "InstanceTypes": "Specify the instance types for a node group. If you specify a GPU instance type, make sure to also specify an applicable GPU AMI type with the `amiType` parameter. If you specify `launchTemplate` , then you can specify zero or one instance type in your launch template *or* you can specify 0-20 instance types for `instanceTypes` . If however, you specify an instance type in your launch template *and* specify any `instanceTypes` , the node group deployment will fail. If you don't specify an instance type in a launch template or for `instanceTypes` , then `t3.medium` is used, by default. If you specify `Spot` for `capacityType` , then we recommend specifying multiple values for `instanceTypes` . For more information, see [Managed node group capacity types](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html#managed-node-group-capacity-types) and [Customizing managed nodes with launch templates](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html) in the *Amazon EKS User Guide* .", "Labels": "The Kubernetes `labels` applied to the nodes in the node group.\n\n> Only `labels` that are applied with the Amazon EKS API are shown here. There may be other Kubernetes `labels` applied to the nodes in this group.", "LaunchTemplate": "An object representing a node group's launch template specification. When using this object, don't directly specify `instanceTypes` , `diskSize` , or `remoteAccess` . Make sure that the launch template meets the requirements in `launchTemplateSpecification` . Also refer to [Customizing managed nodes with launch templates](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html) in the *Amazon EKS User Guide* .", - "NodeRepairConfig": "", + "NodeRepairConfig": "The node auto repair configuration for the node group.", "NodeRole": "The Amazon Resource Name (ARN) of the IAM role to associate with your node group. The Amazon EKS worker node `kubelet` daemon makes calls to AWS APIs on your behalf. Nodes receive permissions for these API calls through an IAM instance profile and associated policies. Before you can launch nodes and register them into a cluster, you must create an IAM role for those nodes to use when they are launched. For more information, see [Amazon EKS node IAM role](https://docs.aws.amazon.com/eks/latest/userguide/create-node-role.html) in the **Amazon EKS User Guide** . If you specify `launchTemplate` , then don't specify `[IamInstanceProfile](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_IamInstanceProfile.html)` in your launch template, or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see [Customizing managed nodes with launch templates](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html) in the *Amazon EKS User Guide* .", "NodegroupName": "The unique name to give your node group.", "ReleaseVersion": "The AMI version of the Amazon EKS optimized AMI to use with your node group (for example, `1.14.7- *YYYYMMDD*` ). By default, the latest available AMI version for the node group's current Kubernetes version is used. For more information, see [Amazon EKS optimized Linux AMI Versions](https://docs.aws.amazon.com/eks/latest/userguide/eks-linux-ami-versions.html) in the *Amazon EKS User Guide* .\n\n> Changing this value triggers an update of the node group if one is available. You can't update other properties at the same time as updating `Release Version` .", @@ -15435,7 +15660,7 @@ "Version": "The version number of the launch template to use. If no version is specified, then the template's default version is used." }, "AWS::EKS::Nodegroup NodeRepairConfig": { - "Enabled": "" + "Enabled": "Specifies whether to enable node auto repair for the node group. Node auto repair is disabled by default." }, "AWS::EKS::Nodegroup RemoteAccess": { "Ec2SshKey": "The Amazon EC2 SSH key name that provides access for SSH communication with the nodes in the managed node group. For more information, see [Amazon EC2 key pairs and Linux instances](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) in the *Amazon Elastic Compute Cloud User Guide for Linux Instances* . For Windows, an Amazon EC2 SSH key is used to obtain the RDP password. For more information, see [Amazon EC2 key pairs and Windows instances](https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ec2-key-pairs.html) in the *Amazon Elastic Compute Cloud User Guide for Windows Instances* .", @@ -15931,6 +16156,7 @@ "NetworkConfiguration": "The network configuration for customer VPC connectivity for the application.", "ReleaseLabel": "The EMR release associated with the application.", "RuntimeConfiguration": "The [Configuration](https://docs.aws.amazon.com/emr-serverless/latest/APIReference/API_Configuration.html) specifications of an application. Each configuration consists of a classification and properties. You use this parameter when creating or updating an application. To see the runtimeConfiguration object of an application, run the [GetApplication](https://docs.aws.amazon.com/emr-serverless/latest/APIReference/API_GetApplication.html) API operation.", + "SchedulerConfiguration": "The scheduler configuration for batch and streaming jobs running on this application. Supported with release labels emr-7.0.0 and above.", "Tags": "The tags assigned to the application.", "Type": "The type of application, such as Spark or Hive.", "WorkerTypeSpecifications": "The specification applied to each worker type." @@ -15995,6 +16221,10 @@ "EncryptionKeyArn": "The KMS key ARN to encrypt the logs published to the given Amazon S3 destination.", "LogUri": "The Amazon S3 destination URI for log publishing." }, + "AWS::EMRServerless::Application SchedulerConfiguration": { + "MaxConcurrentRuns": "The maximum concurrent job runs on this application. If scheduler configuration is enabled on your application, the default value is 15. The valid range is 1 to 1000.", + "QueueTimeoutMinutes": "The maximum duration in minutes for the job in QUEUED state. If scheduler configuration is enabled on your application, the default value is 360 minutes (6 hours). The valid range is from 15 to 720." + }, "AWS::EMRServerless::Application Tag": { "Key": "The key to use in the tag.", "Value": "The value of the tag." @@ -16456,6 +16686,7 @@ "Value": "The value of the attribute." }, "AWS::ElasticLoadBalancingV2::Listener MutualAuthentication": { + "AdvertiseTrustStoreCaNames": "", "IgnoreClientCertificateExpiry": "Indicates whether expired client certificates are ignored.", "Mode": "The client certificate handling method. Options are `off` , `passthrough` or `verify` . The default value is `off` .", "TrustStoreArn": "The Amazon Resource Name (ARN) of the trust store." @@ -16648,7 +16879,7 @@ "Port": "The port on which the target is listening. If the target group protocol is GENEVE, the supported port is 6081. If the target type is `alb` , the targeted Application Load Balancer must have at least one listener whose port matches the target group port. This parameter is not used if the target is a Lambda function." }, "AWS::ElasticLoadBalancingV2::TargetGroup TargetGroupAttribute": { - "Key": "The name of the attribute.\n\nThe following attributes are supported by all load balancers:\n\n- `deregistration_delay.timeout_seconds` - The amount of time, in seconds, for Elastic Load Balancing to wait before changing the state of a deregistering target from `draining` to `unused` . The range is 0-3600 seconds. The default value is 300 seconds. If the target is a Lambda function, this attribute is not supported.\n- `stickiness.enabled` - Indicates whether target stickiness is enabled. The value is `true` or `false` . The default is `false` .\n- `stickiness.type` - Indicates the type of stickiness. The possible values are:\n\n- `lb_cookie` and `app_cookie` for Application Load Balancers.\n- `source_ip` for Network Load Balancers.\n- `source_ip_dest_ip` and `source_ip_dest_ip_proto` for Gateway Load Balancers.\n\nThe following attributes are supported by Application Load Balancers and Network Load Balancers:\n\n- `load_balancing.cross_zone.enabled` - Indicates whether cross zone load balancing is enabled. The value is `true` , `false` or `use_load_balancer_configuration` . The default is `use_load_balancer_configuration` .\n- `target_group_health.dns_failover.minimum_healthy_targets.count` - The minimum number of targets that must be healthy. If the number of healthy targets is below this value, mark the zone as unhealthy in DNS, so that traffic is routed only to healthy zones. The possible values are `off` or an integer from 1 to the maximum number of targets. The default is `off` .\n- `target_group_health.dns_failover.minimum_healthy_targets.percentage` - The minimum percentage of targets that must be healthy. If the percentage of healthy targets is below this value, mark the zone as unhealthy in DNS, so that traffic is routed only to healthy zones. The possible values are `off` or an integer from 1 to 100. The default is `off` .\n- `target_group_health.unhealthy_state_routing.minimum_healthy_targets.count` - The minimum number of targets that must be healthy. If the number of healthy targets is below this value, send traffic to all targets, including unhealthy targets. The possible values are 1 to the maximum number of targets. The default is 1.\n- `target_group_health.unhealthy_state_routing.minimum_healthy_targets.percentage` - The minimum percentage of targets that must be healthy. If the percentage of healthy targets is below this value, send traffic to all targets, including unhealthy targets. The possible values are `off` or an integer from 1 to 100. The default is `off` .\n\nThe following attributes are supported only if the load balancer is an Application Load Balancer and the target is an instance or an IP address:\n\n- `load_balancing.algorithm.type` - The load balancing algorithm determines how the load balancer selects targets when routing requests. The value is `round_robin` , `least_outstanding_requests` , or `weighted_random` . The default is `round_robin` .\n- `load_balancing.algorithm.anomaly_mitigation` - Only available when `load_balancing.algorithm.type` is `weighted_random` . Indicates whether anomaly mitigation is enabled. The value is `on` or `off` . The default is `off` .\n- `slow_start.duration_seconds` - The time period, in seconds, during which a newly registered target receives an increasing share of the traffic to the target group. After this time period ends, the target receives its full share of traffic. The range is 30-900 seconds (15 minutes). The default is 0 seconds (disabled).\n- `stickiness.app_cookie.cookie_name` - Indicates the name of the application-based cookie. Names that start with the following prefixes are not allowed: `AWSALB` , `AWSALBAPP` , and `AWSALBTG` ; they're reserved for use by the load balancer.\n- `stickiness.app_cookie.duration_seconds` - The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the application-based cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds).\n- `stickiness.lb_cookie.duration_seconds` - The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the load balancer-generated cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds).\n\nThe following attribute is supported only if the load balancer is an Application Load Balancer and the target is a Lambda function:\n\n- `lambda.multi_value_headers.enabled` - Indicates whether the request and response headers that are exchanged between the load balancer and the Lambda function include arrays of values or strings. The value is `true` or `false` . The default is `false` . If the value is `false` and the request contains a duplicate header field name or query parameter key, the load balancer uses the last value sent by the client.\n\nThe following attributes are supported only by Network Load Balancers:\n\n- `deregistration_delay.connection_termination.enabled` - Indicates whether the load balancer terminates connections at the end of the deregistration timeout. The value is `true` or `false` . For new UDP/TCP_UDP target groups the default is `true` . Otherwise, the default is `false` .\n- `preserve_client_ip.enabled` - Indicates whether client IP preservation is enabled. The value is `true` or `false` . The default is disabled if the target group type is IP address and the target group protocol is TCP or TLS. Otherwise, the default is enabled. Client IP preservation can't be disabled for UDP and TCP_UDP target groups.\n- `proxy_protocol_v2.enabled` - Indicates whether Proxy Protocol version 2 is enabled. The value is `true` or `false` . The default is `false` .\n- `target_health_state.unhealthy.connection_termination.enabled` - Indicates whether the load balancer terminates connections to unhealthy targets. The value is `true` or `false` . The default is `true` .\n- `target_health_state.unhealthy.draining_interval_seconds` - The amount of time for Elastic Load Balancing to wait before changing the state of an unhealthy target from `unhealthy.draining` to `unhealthy` . The range is 0-360000 seconds. The default value is 0 seconds.\n\nNote: This attribute can only be configured when `target_health_state.unhealthy.connection_termination.enabled` is `false` .\n\nThe following attributes are supported only by Gateway Load Balancers:\n\n- `target_failover.on_deregistration` - Indicates how the Gateway Load Balancer handles existing flows when a target is deregistered. The possible values are `rebalance` and `no_rebalance` . The default is `no_rebalance` . The two attributes ( `target_failover.on_deregistration` and `target_failover.on_unhealthy` ) can't be set independently. The value you set for both attributes must be the same.\n- `target_failover.on_unhealthy` - Indicates how the Gateway Load Balancer handles existing flows when a target is unhealthy. The possible values are `rebalance` and `no_rebalance` . The default is `no_rebalance` . The two attributes ( `target_failover.on_deregistration` and `target_failover.on_unhealthy` ) can't be set independently. The value you set for both attributes must be the same.", + "Key": "The name of the attribute.\n\nThe following attributes are supported by all load balancers:\n\n- `deregistration_delay.timeout_seconds` - The amount of time, in seconds, for Elastic Load Balancing to wait before changing the state of a deregistering target from `draining` to `unused` . The range is 0-3600 seconds. The default value is 300 seconds. If the target is a Lambda function, this attribute is not supported.\n- `stickiness.enabled` - Indicates whether target stickiness is enabled. The value is `true` or `false` . The default is `false` .\n- `stickiness.type` - Indicates the type of stickiness. The possible values are:\n\n- `lb_cookie` and `app_cookie` for Application Load Balancers.\n- `source_ip` for Network Load Balancers.\n- `source_ip_dest_ip` and `source_ip_dest_ip_proto` for Gateway Load Balancers.\n\nThe following attributes are supported by Application Load Balancers and Network Load Balancers:\n\n- `load_balancing.cross_zone.enabled` - Indicates whether cross zone load balancing is enabled. The value is `true` , `false` or `use_load_balancer_configuration` . The default is `use_load_balancer_configuration` .\n- `target_group_health.dns_failover.minimum_healthy_targets.count` - The minimum number of targets that must be healthy. If the number of healthy targets is below this value, mark the zone as unhealthy in DNS, so that traffic is routed only to healthy zones. The possible values are `off` or an integer from 1 to the maximum number of targets. The default is `off` .\n- `target_group_health.dns_failover.minimum_healthy_targets.percentage` - The minimum percentage of targets that must be healthy. If the percentage of healthy targets is below this value, mark the zone as unhealthy in DNS, so that traffic is routed only to healthy zones. The possible values are `off` or an integer from 1 to 100. The default is `off` .\n- `target_group_health.unhealthy_state_routing.minimum_healthy_targets.count` - The minimum number of targets that must be healthy. If the number of healthy targets is below this value, send traffic to all targets, including unhealthy targets. The possible values are 1 to the maximum number of targets. The default is 1.\n- `target_group_health.unhealthy_state_routing.minimum_healthy_targets.percentage` - The minimum percentage of targets that must be healthy. If the percentage of healthy targets is below this value, send traffic to all targets, including unhealthy targets. The possible values are `off` or an integer from 1 to 100. The default is `off` .\n\nThe following attributes are supported only if the load balancer is an Application Load Balancer and the target is an instance or an IP address:\n\n- `load_balancing.algorithm.type` - The load balancing algorithm determines how the load balancer selects targets when routing requests. The value is `round_robin` , `least_outstanding_requests` , or `weighted_random` . The default is `round_robin` .\n- `load_balancing.algorithm.anomaly_mitigation` - Only available when `load_balancing.algorithm.type` is `weighted_random` . Indicates whether anomaly mitigation is enabled. The value is `on` or `off` . The default is `off` .\n- `slow_start.duration_seconds` - The time period, in seconds, during which a newly registered target receives an increasing share of the traffic to the target group. After this time period ends, the target receives its full share of traffic. The range is 30-900 seconds (15 minutes). The default is 0 seconds (disabled).\n- `stickiness.app_cookie.cookie_name` - Indicates the name of the application-based cookie. Names that start with the following prefixes are not allowed: `AWSALB` , `AWSALBAPP` , and `AWSALBTG` ; they're reserved for use by the load balancer.\n- `stickiness.app_cookie.duration_seconds` - The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the application-based cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds).\n- `stickiness.lb_cookie.duration_seconds` - The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the load balancer-generated cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds).\n\nThe following attribute is supported only if the load balancer is an Application Load Balancer and the target is a Lambda function:\n\n- `lambda.multi_value_headers.enabled` - Indicates whether the request and response headers that are exchanged between the load balancer and the Lambda function include arrays of values or strings. The value is `true` or `false` . The default is `false` . If the value is `false` and the request contains a duplicate header field name or query parameter key, the load balancer uses the last value sent by the client.\n\nThe following attributes are supported only by Network Load Balancers:\n\n- `deregistration_delay.connection_termination.enabled` - Indicates whether the load balancer terminates connections at the end of the deregistration timeout. The value is `true` or `false` . For new UDP/TCP_UDP target groups the default is `true` . Otherwise, the default is `false` .\n- `preserve_client_ip.enabled` - Indicates whether client IP preservation is enabled. The value is `true` or `false` . The default is disabled if the target group type is IP address and the target group protocol is TCP or TLS. Otherwise, the default is enabled. Client IP preservation can't be disabled for UDP and TCP_UDP target groups.\n- `proxy_protocol_v2.enabled` - Indicates whether Proxy Protocol version 2 is enabled. The value is `true` or `false` . The default is `false` .\n- `target_health_state.unhealthy.connection_termination.enabled` - Indicates whether the load balancer terminates connections to unhealthy targets. The value is `true` or `false` . The default is `true` . This attribute can't be enabled for UDP and TCP_UDP target groups.\n- `target_health_state.unhealthy.draining_interval_seconds` - The amount of time for Elastic Load Balancing to wait before changing the state of an unhealthy target from `unhealthy.draining` to `unhealthy` . The range is 0-360000 seconds. The default value is 0 seconds.\n\nNote: This attribute can only be configured when `target_health_state.unhealthy.connection_termination.enabled` is `false` .\n\nThe following attributes are supported only by Gateway Load Balancers:\n\n- `target_failover.on_deregistration` - Indicates how the Gateway Load Balancer handles existing flows when a target is deregistered. The possible values are `rebalance` and `no_rebalance` . The default is `no_rebalance` . The two attributes ( `target_failover.on_deregistration` and `target_failover.on_unhealthy` ) can't be set independently. The value you set for both attributes must be the same.\n- `target_failover.on_unhealthy` - Indicates how the Gateway Load Balancer handles existing flows when a target is unhealthy. The possible values are `rebalance` and `no_rebalance` . The default is `no_rebalance` . The two attributes ( `target_failover.on_deregistration` and `target_failover.on_unhealthy` ) can't be set independently. The value you set for both attributes must be the same.", "Value": "The value of the attribute." }, "AWS::ElasticLoadBalancingV2::TrustStore": { @@ -16979,9 +17210,10 @@ "SourceArn": "The ARN of the event bus that sends events to the archive." }, "AWS::Events::Connection": { - "AuthParameters": "A `CreateConnectionAuthRequestParameters` object that contains the authorization parameters to use to authorize with the endpoint.", + "AuthParameters": "The authorization parameters to use to authorize with the endpoint.\n\nYou must include only authorization parameters for the `AuthorizationType` you specify.", "AuthorizationType": "The type of authorization to use for the connection.\n\n> OAUTH tokens are refreshed when a 401 or 407 response is returned.", "Description": "A description for the connection to create.", + "InvocationConnectivityParameters": "For connections to private APIs, the parameters to use for invoking the API.\n\nFor more information, see [Connecting to private APIs](https://docs.aws.amazon.com/eventbridge/latest/userguide/connection-private.html) in the **Amazon EventBridge User Guide** .", "Name": "The name for the connection to create." }, "AWS::Events::Connection ApiKeyAuthParameters": { @@ -16991,6 +17223,7 @@ "AWS::Events::Connection AuthParameters": { "ApiKeyAuthParameters": "The API Key parameters to use for authorization.", "BasicAuthParameters": "The authorization parameters for Basic authorization.", + "ConnectivityParameters": "For private OAuth authentication endpoints. The parameters EventBridge uses to authenticate against the endpoint.\n\nFor more information, see [Authorization methods for connections](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-target-connection-auth.html) in the **Amazon EventBridge User Guide** .", "InvocationHttpParameters": "Additional parameters for the connection that are passed through with every invocation to the HTTP endpoint.", "OAuthParameters": "The OAuth parameters to use for authorization." }, @@ -17003,21 +17236,31 @@ "ClientSecret": "The client secret assciated with the client ID to use for OAuth authorization." }, "AWS::Events::Connection ConnectionHttpParameters": { - "BodyParameters": "Contains additional body string parameters for the connection.", - "HeaderParameters": "Contains additional header parameters for the connection.", - "QueryStringParameters": "Contains additional query string parameters for the connection." + "BodyParameters": "Any additional body string parameters for the connection.", + "HeaderParameters": "Any additional header parameters for the connection.", + "QueryStringParameters": "Any additional query string parameters for the connection." + }, + "AWS::Events::Connection ConnectivityParameters": { + "ResourceParameters": "The parameters for EventBridge to use when invoking the resource endpoint." + }, + "AWS::Events::Connection InvocationConnectivityParameters": { + "ResourceParameters": "The parameters for EventBridge to use when invoking the resource endpoint." }, "AWS::Events::Connection OAuthParameters": { "AuthorizationEndpoint": "The URL to the authorization endpoint when OAuth is specified as the authorization type.", - "ClientParameters": "A `CreateConnectionOAuthClientRequestParameters` object that contains the client parameters for OAuth authorization.", + "ClientParameters": "The client parameters for OAuth authorization.", "HttpMethod": "The method to use for the authorization request.", - "OAuthHttpParameters": "A `ConnectionHttpParameters` object that contains details about the additional parameters to use for the connection." + "OAuthHttpParameters": "Details about the additional parameters to use for the connection." }, "AWS::Events::Connection Parameter": { "IsValueSecret": "Specifies whether the value is secret.", "Key": "The key for a query string parameter.", "Value": "The value associated with the key for the query string parameter." }, + "AWS::Events::Connection ResourceParameters": { + "ResourceAssociationArn": "For connections to private APIs, the Amazon Resource Name (ARN) of the resource association EventBridge created between the connection and the private API's resource configuration.\n\n> The value of this property is set by EventBridge . Any value you specify in your template is ignored.", + "ResourceConfigurationArn": "The Amazon Resource Name (ARN) of the Amazon VPC Lattice resource configuration for the resource endpoint." + }, "AWS::Events::Endpoint": { "Description": "A description for the endpoint.", "EventBuses": "The event buses being used by the endpoint.\n\n*Exactly* : `2`", @@ -17172,11 +17415,11 @@ "Values": "If `Key` is `tag:` *tag-key* , `Values` is a list of tag values. If `Key` is `InstanceIds` , `Values` is a list of Amazon EC2 instance IDs." }, "AWS::Events::Rule SageMakerPipelineParameter": { - "Name": "Name of parameter to start execution of a SageMaker Model Building Pipeline.", - "Value": "Value of parameter to start execution of a SageMaker Model Building Pipeline." + "Name": "Name of parameter to start execution of a SageMaker AI Model Building Pipeline.", + "Value": "Value of parameter to start execution of a SageMaker AI Model Building Pipeline." }, "AWS::Events::Rule SageMakerPipelineParameters": { - "PipelineParameterList": "List of Parameter names and values for SageMaker Model Building Pipeline execution." + "PipelineParameterList": "List of Parameter names and values for SageMaker AI Model Building Pipeline execution." }, "AWS::Events::Rule SqsParameters": { "MessageGroupId": "The FIFO message group ID to use as the target." @@ -17198,10 +17441,10 @@ "InputTransformer": "Settings to enable you to provide custom input to a target based on certain event data. You can extract one or more key-value pairs from the event and then use that data to send customized input to the target.", "KinesisParameters": "The custom parameter you can use to control the shard assignment, when the target is a Kinesis data stream. If you do not include this parameter, the default is to use the `eventId` as the partition key.", "RedshiftDataParameters": "Contains the Amazon Redshift Data API parameters to use when the target is a Amazon Redshift cluster.\n\nIf you specify a Amazon Redshift Cluster as a Target, you can use this to specify parameters to invoke the Amazon Redshift Data API ExecuteStatement based on EventBridge events.", - "RetryPolicy": "The `RetryPolicy` object that contains the retry policy configuration to use for the dead-letter queue.", + "RetryPolicy": "The retry policy configuration to use for the dead-letter queue.", "RoleArn": "The Amazon Resource Name (ARN) of the IAM role to be used for this target when the rule is triggered. If one rule triggers multiple targets, you can use a different IAM role for each target.", "RunCommandParameters": "Parameters used when you are using the rule to invoke Amazon EC2 Run Command.", - "SageMakerPipelineParameters": "Contains the SageMaker Model Building Pipeline parameters to start execution of a SageMaker Model Building Pipeline.\n\nIf you specify a SageMaker Model Building Pipeline as a target, you can use this to specify parameters to start a pipeline execution based on EventBridge events.", + "SageMakerPipelineParameters": "Contains the SageMaker AI Model Building Pipeline parameters to start execution of a SageMaker AI Model Building Pipeline.\n\nIf you specify a SageMaker AI Model Building Pipeline as a target, you can use this to specify parameters to start a pipeline execution based on EventBridge events.", "SqsParameters": "Contains the message group ID to use when the target is a FIFO queue.\n\nIf you specify an SQS FIFO queue as a target, the queue must have content-based deduplication enabled." }, "AWS::Evidently::Experiment": { @@ -17517,7 +17760,7 @@ }, "AWS::FSx::DataRepositoryAssociation": { "BatchImportMetaDataOnCreate": "A boolean flag indicating whether an import data repository task to import metadata should run after the data repository association is created. The task runs if this flag is set to `true` .", - "DataRepositoryPath": "The path to the Amazon S3 data repository that will be linked to the file system. The path can be an S3 bucket or prefix in the format `s3://bucket-name/prefix/` . This path specifies where in the S3 data repository files will be imported from or exported to.", + "DataRepositoryPath": "The path to the Amazon S3 data repository that will be linked to the file system. The path can be an S3 bucket or prefix in the format `s3://myBucket/myPrefix/` . This path specifies where in the S3 data repository files will be imported from or exported to.", "FileSystemId": "The ID of the file system on which the data repository association is configured.", "FileSystemPath": "A path on the Amazon FSx for Lustre file system that points to a high-level directory (such as `/ns1/` ) or subdirectory (such as `/ns1/subdir/` ) that will be mapped 1-1 with `DataRepositoryPath` . The leading forward slash in the name is required. Two data repository associations cannot have overlapping file system paths. For example, if a data repository is associated with file system path `/ns1/` , then you cannot link another data repository with file system path `/ns1/ns2` .\n\nThis path specifies where in your file system files will be exported from or imported to. This file system directory can be linked to only one Amazon S3 bucket, and no other S3 bucket can be linked to the directory.\n\n> If you specify only a forward slash ( `/` ) as the file system path, you can link only one data repository to the file system. You can only specify \"/\" as the file system path for the first data repository associated with a file system.", "ImportedFileChunkSize": "For files imported from a data repository, this value determines the stripe count and maximum amount of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single file can be striped across is limited by the total number of disks that make up the file system or cache.\n\nThe default chunk size is 1,024 MiB (1 GiB) and can go as high as 512,000 MiB (500 GiB). Amazon S3 objects have a maximum size of 5 TB.", @@ -17548,7 +17791,7 @@ "OpenZFSConfiguration": "The Amazon FSx for OpenZFS configuration properties for the file system that you are creating.", "SecurityGroupIds": "A list of IDs specifying the security groups to apply to all network interfaces created for file system access. This list isn't returned in later requests to describe the file system.\n\n> You must specify a security group if you are creating a Multi-AZ FSx for ONTAP file system in a VPC subnet that has been shared with you.", "StorageCapacity": "Sets the storage capacity of the file system that you're creating.\n\n`StorageCapacity` is required if you are creating a new file system. It is not required if you are creating a file system by restoring a backup.\n\n*FSx for Lustre file systems* - The amount of storage capacity that you can configure depends on the value that you set for `StorageType` and the Lustre `DeploymentType` , as follows:\n\n- For `SCRATCH_2` , `PERSISTENT_2` and `PERSISTENT_1` deployment types using SSD storage type, the valid values are 1200 GiB, 2400 GiB, and increments of 2400 GiB.\n- For `PERSISTENT_1` HDD file systems, valid values are increments of 6000 GiB for 12 MB/s/TiB file systems and increments of 1800 GiB for 40 MB/s/TiB file systems.\n- For `SCRATCH_1` deployment type, valid values are 1200 GiB, 2400 GiB, and increments of 3600 GiB.\n\n*FSx for ONTAP file systems* - The amount of SSD storage capacity that you can configure depends on the value of the `HAPairs` property. The minimum value is calculated as 1,024 GiB * HAPairs and the maximum is calculated as 524,288 GiB * HAPairs, up to a maximum amount of SSD storage capacity of 1,048,576 GiB (1 pebibyte).\n\n*FSx for OpenZFS file systems* - The amount of storage capacity that you can configure is from 64 GiB up to 524,288 GiB (512 TiB). If you are creating a file system from a backup, you can specify a storage capacity equal to or greater than the original file system's storage capacity.\n\n*FSx for Windows File Server file systems* - The amount of storage capacity that you can configure depends on the value that you set for `StorageType` as follows:\n\n- For SSD storage, valid values are 32 GiB-65,536 GiB (64 TiB).\n- For HDD storage, valid values are 2000 GiB-65,536 GiB (64 TiB).", - "StorageType": "Sets the storage type for the file system that you're creating. Valid values are `SSD` and `HDD` .\n\n- Set to `SSD` to use solid state drive storage. SSD is supported on all Windows, Lustre, ONTAP, and OpenZFS deployment types.\n- Set to `HDD` to use hard disk drive storage. HDD is supported on `SINGLE_AZ_2` and `MULTI_AZ_1` Windows file system deployment types, and on `PERSISTENT_1` Lustre file system deployment types.\n\nDefault value is `SSD` . For more information, see [Storage type options](https://docs.aws.amazon.com/fsx/latest/WindowsGuide/optimize-fsx-costs.html#storage-type-options) in the *FSx for Windows File Server User Guide* and [Multiple storage options](https://docs.aws.amazon.com/fsx/latest/LustreGuide/what-is.html#storage-options) in the *FSx for Lustre User Guide* .", + "StorageType": "Sets the storage class for the file system that you're creating. Valid values are `SSD` , `HDD` , and `INTELLIGENT_TIERING` .\n\n- Set to `SSD` to use solid state drive storage. SSD is supported on all Windows, Lustre, ONTAP, and OpenZFS deployment types.\n- Set to `HDD` to use hard disk drive storage. HDD is supported on `SINGLE_AZ_2` and `MULTI_AZ_1` Windows file system deployment types, and on `PERSISTENT_1` Lustre file system deployment types.\n- Set to `INTELLIGENT_TIERING` to use fully elastic, intelligently-tiered storage. Intelligent-Tiering is only available for OpenZFS file systems with the Multi-AZ deployment type.\n\nDefault value is `SSD` . For more information, see [Storage type options](https://docs.aws.amazon.com/fsx/latest/WindowsGuide/optimize-fsx-costs.html#storage-type-options) in the *FSx for Windows File Server User Guide* , [Multiple storage options](https://docs.aws.amazon.com/fsx/latest/LustreGuide/what-is.html#storage-options) in the *FSx for Lustre User Guide* , and [Working with Intelligent-Tiering](https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/performance-intelligent-tiering) in the *Amazon FSx for OpenZFS User Guide* .", "SubnetIds": "Specifies the IDs of the subnets that the file system will be accessible from. For Windows and ONTAP `MULTI_AZ_1` deployment types,provide exactly two subnet IDs, one for the preferred file server and one for the standby file server. You specify one of these subnets as the preferred subnet using the `WindowsConfiguration > PreferredSubnetID` or `OntapConfiguration > PreferredSubnetID` properties. For more information about Multi-AZ file system configuration, see [Availability and durability: Single-AZ and Multi-AZ file systems](https://docs.aws.amazon.com/fsx/latest/WindowsGuide/high-availability-multiAZ.html) in the *Amazon FSx for Windows User Guide* and [Availability and durability](https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/high-availability-multiAZ.html) in the *Amazon FSx for ONTAP User Guide* .\n\nFor Windows `SINGLE_AZ_1` and `SINGLE_AZ_2` and all Lustre deployment types, provide exactly one subnet ID. The file server is launched in that subnet's Availability Zone.", "Tags": "The tags to associate with the file system. For more information, see [Tagging your Amazon FSx resources](https://docs.aws.amazon.com/fsx/latest/LustreGuide/tag-resources.html) in the *Amazon FSx for Lustre User Guide* .", "WindowsConfiguration": "The configuration object for the Microsoft Windows file system you are creating.\n\nThis value is required if `FileSystemType` is set to `WINDOWS` ." @@ -17574,6 +17817,7 @@ "DataCompressionType": "Sets the data compression configuration for the file system. `DataCompressionType` can have the following values:\n\n- `NONE` - (Default) Data compression is turned off when the file system is created.\n- `LZ4` - Data compression is turned on with the LZ4 algorithm.\n\nFor more information, see [Lustre data compression](https://docs.aws.amazon.com/fsx/latest/LustreGuide/data-compression.html) in the *Amazon FSx for Lustre User Guide* .", "DeploymentType": "(Optional) Choose `SCRATCH_1` and `SCRATCH_2` deployment types when you need temporary storage and shorter-term processing of data. The `SCRATCH_2` deployment type provides in-transit encryption of data and higher burst throughput capacity than `SCRATCH_1` .\n\nChoose `PERSISTENT_1` for longer-term storage and for throughput-focused workloads that aren\u2019t latency-sensitive. `PERSISTENT_1` supports encryption of data in transit, and is available in all AWS Regions in which FSx for Lustre is available.\n\nChoose `PERSISTENT_2` for longer-term storage and for latency-sensitive workloads that require the highest levels of IOPS/throughput. `PERSISTENT_2` supports SSD storage, and offers higher `PerUnitStorageThroughput` (up to 1000 MB/s/TiB). You can optionally specify a metadata configuration mode for `PERSISTENT_2` which supports increasing metadata performance. `PERSISTENT_2` is available in a limited number of AWS Regions . For more information, and an up-to-date list of AWS Regions in which `PERSISTENT_2` is available, see [File system deployment options for FSx for Lustre](https://docs.aws.amazon.com/fsx/latest/LustreGuide/using-fsx-lustre.html#lustre-deployment-types) in the *Amazon FSx for Lustre User Guide* .\n\n> If you choose `PERSISTENT_2` , and you set `FileSystemTypeVersion` to `2.10` , the `CreateFileSystem` operation fails. \n\nEncryption of data in transit is automatically turned on when you access `SCRATCH_2` , `PERSISTENT_1` , and `PERSISTENT_2` file systems from Amazon EC2 instances that support automatic encryption in the AWS Regions where they are available. For more information about encryption in transit for FSx for Lustre file systems, see [Encrypting data in transit](https://docs.aws.amazon.com/fsx/latest/LustreGuide/encryption-in-transit-fsxl.html) in the *Amazon FSx for Lustre User Guide* .\n\n(Default = `SCRATCH_1` )", "DriveCacheType": "The type of drive cache used by `PERSISTENT_1` file systems that are provisioned with HDD storage devices. This parameter is required when storage type is HDD. Set this property to `READ` to improve the performance for frequently accessed files by caching up to 20% of the total storage capacity of the file system.\n\nThis parameter is required when `StorageType` is set to `HDD` and `DeploymentType` is `PERSISTENT_1` .", + "EfaEnabled": "", "ExportPath": "(Optional) Specifies the path in the Amazon S3 bucket where the root of your Amazon FSx file system is exported. The path must use the same Amazon S3 bucket as specified in ImportPath. You can provide an optional prefix to which new and changed data is to be exported from your Amazon FSx for Lustre file system. If an `ExportPath` value is not provided, Amazon FSx sets a default export path, `s3://import-bucket/FSxLustre[creation-timestamp]` . The timestamp is in UTC format, for example `s3://import-bucket/FSxLustre20181105T222312Z` .\n\nThe Amazon S3 export bucket must be the same as the import bucket specified by `ImportPath` . If you specify only a bucket name, such as `s3://import-bucket` , you get a 1:1 mapping of file system objects to S3 bucket objects. This mapping means that the input data in S3 is overwritten on export. If you provide a custom prefix in the export path, such as `s3://import-bucket/[custom-optional-prefix]` , Amazon FSx exports the contents of your file system to that export prefix in the Amazon S3 bucket.\n\n> This parameter is not supported for file systems with a data repository association.", "ImportPath": "(Optional) The path to the Amazon S3 bucket (including the optional prefix) that you're using as the data repository for your Amazon FSx for Lustre file system. The root of your FSx for Lustre file system will be mapped to the root of the Amazon S3 bucket you select. An example is `s3://import-bucket/optional-prefix` . If you specify a prefix after the Amazon S3 bucket name, only object keys with that prefix are loaded into the file system.\n\n> This parameter is not supported for Lustre file systems with a data repository association.", "ImportedFileChunkSize": "(Optional) For files imported from a data repository, this value determines the stripe count and maximum amount of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single file can be striped across is limited by the total number of disks that make up the file system.\n\nThe default chunk size is 1,024 MiB (1 GiB) and can go as high as 512,000 MiB (500 GiB). Amazon S3 objects have a maximum size of 5 TB.\n\n> This parameter is not supported for Lustre file systems with a data repository association.", @@ -17612,11 +17856,16 @@ "EndpointIpAddressRange": "(Multi-AZ only) Specifies the IP address range in which the endpoints to access your file system will be created. By default in the Amazon FSx API and Amazon FSx console, Amazon FSx selects an available /28 IP address range for you from one of the VPC's CIDR ranges. You can have overlapping endpoint IP addresses for file systems deployed in the same VPC/route tables.", "Options": "To delete a file system if there are child volumes present below the root volume, use the string `DELETE_CHILD_VOLUMES_AND_SNAPSHOTS` . If your file system has child volumes and you don't use this option, the delete request will fail.", "PreferredSubnetId": "Required when `DeploymentType` is set to `MULTI_AZ_1` . This specifies the subnet in which you want the preferred file server to be located.", + "ReadCacheConfiguration": "Specifies the optional provisioned SSD read cache on file systems that use the Intelligent-Tiering storage class.", "RootVolumeConfiguration": "The configuration Amazon FSx uses when creating the root value of the Amazon FSx for OpenZFS file system. All volumes are children of the root volume.", "RouteTableIds": "(Multi-AZ only) Specifies the route tables in which Amazon FSx creates the rules for routing traffic to the correct file server. You should specify all virtual private cloud (VPC) route tables associated with the subnets in which your clients are located. By default, Amazon FSx selects your VPC's default route table.", "ThroughputCapacity": "Specifies the throughput of an Amazon FSx for OpenZFS file system, measured in megabytes per second (MBps). Valid values depend on the DeploymentType you choose, as follows:\n\n- For `MULTI_AZ_1` and `SINGLE_AZ_2` , valid values are 160, 320, 640, 1280, 2560, 3840, 5120, 7680, or 10240 MBps.\n- For `SINGLE_AZ_1` , valid values are 64, 128, 256, 512, 1024, 2048, 3072, or 4096 MBps.\n\nYou pay for additional throughput capacity that you provision.", "WeeklyMaintenanceStartTime": "A recurring weekly time, in the format `D:HH:MM` .\n\n`D` is the day of the week, for which 1 represents Monday and 7 represents Sunday. For further details, see [the ISO-8601 spec as described on Wikipedia](https://docs.aws.amazon.com/https://en.wikipedia.org/wiki/ISO_week_date) .\n\n`HH` is the zero-padded hour of the day (0-23), and `MM` is the zero-padded minute of the hour.\n\nFor example, `1:05:00` specifies maintenance at 5 AM Monday." }, + "AWS::FSx::FileSystem ReadCacheConfiguration": { + "SizeGiB": "Required if `SizingMode` is set to `USER_PROVISIONED` . Specifies the size of the file system's SSD read cache, in gibibytes (GiB).", + "SizingMode": "Specifies how the provisioned SSD read cache is sized, as follows:\n\n- Set to `NO_CACHE` if you do not want to use an SSD read cache with your Intelligent-Tiering file system.\n- Set to `USER_PROVISIONED` to specify the exact size of your SSD read cache.\n- Set to `PROPORTIONAL_TO_THROUGHPUT_CAPACITY` to have your SSD read cache automatically sized based on your throughput capacity." + }, "AWS::FSx::FileSystem RootVolumeConfiguration": { "CopyTagsToSnapshots": "A Boolean value indicating whether tags for the volume should be copied to snapshots of the volume. This value defaults to `false` . If it's set to `true` , all tags for the volume are copied to snapshots where the user doesn't specify tags. If this value is `true` and you specify one or more tags, only the specified tags are copied to snapshots. If you specify one or more tags when creating the snapshot, no tags are copied from the volume, regardless of this value.", "DataCompressionType": "Specifies the method used to compress the data on the volume. The compression type is `NONE` by default.\n\n- `NONE` - Doesn't compress the data on the volume. `NONE` is the default.\n- `ZSTD` - Compresses the data in the volume using the Zstandard (ZSTD) compression algorithm. Compared to LZ4, Z-Standard provides a better compression ratio to minimize on-disk storage utilization.\n- `LZ4` - Compresses the data in the volume using the LZ4 compression algorithm. Compared to Z-Standard, LZ4 is less compute-intensive and delivers higher write throughput speeds.", @@ -17735,7 +17984,7 @@ "OriginSnapshot": "The configuration object that specifies the snapshot to use as the origin of the data for the volume.", "ParentVolumeId": "The ID of the volume to use as the parent volume of the volume that you are creating.", "ReadOnly": "A Boolean value indicating whether the volume is read-only.", - "RecordSizeKiB": "Specifies the suggested block size for a volume in a ZFS dataset, in kibibytes (KiB). Valid values are 4, 8, 16, 32, 64, 128, 256, 512, or 1024 KiB. The default is 128 KiB. We recommend using the default setting for the majority of use cases. Generally, workloads that write in fixed small or large record sizes may benefit from setting a custom record size, like database workloads (small record size) or media streaming workloads (large record size). For additional guidance on when to set a custom record size, see [ZFS Record size](https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/performance.html#record-size-performance) in the *Amazon FSx for OpenZFS User Guide* .", + "RecordSizeKiB": "Specifies the suggested block size for a volume in a ZFS dataset, in kibibytes (KiB). For file systems using the Intelligent-Tiering storage class, valid values are 128, 256, 512, 1024, 2048, or 4096 KiB, with a default of 1024 KiB. For all other file systems, valid values are 4, 8, 16, 32, 64, 128, 256, 512, or 1024 KiB, with a default of 128 KiB. We recommend using the default setting for the majority of use cases. Generally, workloads that write in fixed small or large record sizes may benefit from setting a custom record size, like database workloads (small record size) or media streaming workloads (large record size). For additional guidance on when to set a custom record size, see [ZFS Record size](https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/performance.html#record-size-performance) in the *Amazon FSx for OpenZFS User Guide* .", "StorageCapacityQuotaGiB": "Sets the maximum storage size in gibibytes (GiB) for the volume. You can specify a quota that is larger than the storage on the parent volume. A volume quota limits the amount of storage that the volume can consume to the configured amount, but does not guarantee the space will be available on the parent volume. To guarantee quota space, you must also set `StorageCapacityReservationGiB` . To *not* specify a storage capacity quota, set this to `-1` .\n\nFor more information, see [Volume properties](https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/managing-volumes.html#volume-properties) in the *Amazon FSx for OpenZFS User Guide* .", "StorageCapacityReservationGiB": "Specifies the amount of storage in gibibytes (GiB) to reserve from the parent volume. Setting `StorageCapacityReservationGiB` guarantees that the specified amount of storage space on the parent volume will always be available for the volume. You can't reserve more storage than the parent volume has. To *not* specify a storage capacity reservation, set this to `0` or `-1` . For more information, see [Volume properties](https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/managing-volumes.html#volume-properties) in the *Amazon FSx for OpenZFS User Guide* .", "UserAndGroupQuotas": "Configures how much storage users and groups can use on the volume." @@ -18474,7 +18723,7 @@ }, "AWS::Glue::Connection ConnectionInput": { "ConnectionProperties": "These key-value pairs define parameters for the connection.", - "ConnectionType": "The type of the connection. Currently, these types are supported:\n\n- `JDBC` - Designates a connection to a database through Java Database Connectivity (JDBC).\n\n`JDBC` Connections use the following ConnectionParameters.\n\n- Required: All of ( `HOST` , `PORT` , `JDBC_ENGINE` ) or `JDBC_CONNECTION_URL` .\n- Required: All of ( `USERNAME` , `PASSWORD` ) or `SECRET_ID` .\n- Optional: `JDBC_ENFORCE_SSL` , `CUSTOM_JDBC_CERT` , `CUSTOM_JDBC_CERT_STRING` , `SKIP_CUSTOM_JDBC_CERT_VALIDATION` . These parameters are used to configure SSL with JDBC.\n- `KAFKA` - Designates a connection to an Apache Kafka streaming platform.\n\n`KAFKA` Connections use the following ConnectionParameters.\n\n- Required: `KAFKA_BOOTSTRAP_SERVERS` .\n- Optional: `KAFKA_SSL_ENABLED` , `KAFKA_CUSTOM_CERT` , `KAFKA_SKIP_CUSTOM_CERT_VALIDATION` . These parameters are used to configure SSL with `KAFKA` .\n- Optional: `KAFKA_CLIENT_KEYSTORE` , `KAFKA_CLIENT_KEYSTORE_PASSWORD` , `KAFKA_CLIENT_KEY_PASSWORD` , `ENCRYPTED_KAFKA_CLIENT_KEYSTORE_PASSWORD` , `ENCRYPTED_KAFKA_CLIENT_KEY_PASSWORD` . These parameters are used to configure TLS client configuration with SSL in `KAFKA` .\n- Optional: `KAFKA_SASL_MECHANISM` . Can be specified as `SCRAM-SHA-512` , `GSSAPI` , or `AWS_MSK_IAM` .\n- Optional: `KAFKA_SASL_SCRAM_USERNAME` , `KAFKA_SASL_SCRAM_PASSWORD` , `ENCRYPTED_KAFKA_SASL_SCRAM_PASSWORD` . These parameters are used to configure SASL/SCRAM-SHA-512 authentication with `KAFKA` .\n- Optional: `KAFKA_SASL_GSSAPI_KEYTAB` , `KAFKA_SASL_GSSAPI_KRB5_CONF` , `KAFKA_SASL_GSSAPI_SERVICE` , `KAFKA_SASL_GSSAPI_PRINCIPAL` . These parameters are used to configure SASL/GSSAPI authentication with `KAFKA` .\n- `MONGODB` - Designates a connection to a MongoDB document database.\n\n`MONGODB` Connections use the following ConnectionParameters.\n\n- Required: `CONNECTION_URL` .\n- Required: All of ( `USERNAME` , `PASSWORD` ) or `SECRET_ID` .\n- `SALESFORCE` - Designates a connection to Salesforce using OAuth authencation.\n\n- Requires the `AuthenticationConfiguration` member to be configured.\n- `VIEW_VALIDATION_REDSHIFT` - Designates a connection used for view validation by Amazon Redshift.\n- `VIEW_VALIDATION_ATHENA` - Designates a connection used for view validation by Amazon Athena.\n- `NETWORK` - Designates a network connection to a data source within an Amazon Virtual Private Cloud environment (Amazon VPC).\n\n`NETWORK` Connections do not require ConnectionParameters. Instead, provide a PhysicalConnectionRequirements.\n- `MARKETPLACE` - Uses configuration settings contained in a connector purchased from AWS Marketplace to read from and write to data stores that are not natively supported by AWS Glue .\n\n`MARKETPLACE` Connections use the following ConnectionParameters.\n\n- Required: `CONNECTOR_TYPE` , `CONNECTOR_URL` , `CONNECTOR_CLASS_NAME` , `CONNECTION_URL` .\n- Required for `JDBC` `CONNECTOR_TYPE` connections: All of ( `USERNAME` , `PASSWORD` ) or `SECRET_ID` .\n- `CUSTOM` - Uses configuration settings contained in a custom connector to read from and write to data stores that are not natively supported by AWS Glue .\n\n`SFTP` is not supported.\n\nFor more information about how optional ConnectionProperties are used to configure features in AWS Glue , consult [AWS Glue connection properties](https://docs.aws.amazon.com/glue/latest/dg/connection-defining.html) .\n\nFor more information about how optional ConnectionProperties are used to configure features in AWS Glue Studio, consult [Using connectors and connections](https://docs.aws.amazon.com/glue/latest/ug/connectors-chapter.html) .", + "ConnectionType": "The type of the connection. Currently, these types are supported:\n\n- `JDBC` - Designates a connection to a database through Java Database Connectivity (JDBC).\n\n`JDBC` Connections use the following ConnectionParameters.\n\n- Required: All of ( `HOST` , `PORT` , `JDBC_ENGINE` ) or `JDBC_CONNECTION_URL` .\n- Required: All of ( `USERNAME` , `PASSWORD` ) or `SECRET_ID` .\n- Optional: `JDBC_ENFORCE_SSL` , `CUSTOM_JDBC_CERT` , `CUSTOM_JDBC_CERT_STRING` , `SKIP_CUSTOM_JDBC_CERT_VALIDATION` . These parameters are used to configure SSL with JDBC.\n- `KAFKA` - Designates a connection to an Apache Kafka streaming platform.\n\n`KAFKA` Connections use the following ConnectionParameters.\n\n- Required: `KAFKA_BOOTSTRAP_SERVERS` .\n- Optional: `KAFKA_SSL_ENABLED` , `KAFKA_CUSTOM_CERT` , `KAFKA_SKIP_CUSTOM_CERT_VALIDATION` . These parameters are used to configure SSL with `KAFKA` .\n- Optional: `KAFKA_CLIENT_KEYSTORE` , `KAFKA_CLIENT_KEYSTORE_PASSWORD` , `KAFKA_CLIENT_KEY_PASSWORD` , `ENCRYPTED_KAFKA_CLIENT_KEYSTORE_PASSWORD` , `ENCRYPTED_KAFKA_CLIENT_KEY_PASSWORD` . These parameters are used to configure TLS client configuration with SSL in `KAFKA` .\n- Optional: `KAFKA_SASL_MECHANISM` . Can be specified as `SCRAM-SHA-512` , `GSSAPI` , or `AWS_MSK_IAM` .\n- Optional: `KAFKA_SASL_SCRAM_USERNAME` , `KAFKA_SASL_SCRAM_PASSWORD` , `ENCRYPTED_KAFKA_SASL_SCRAM_PASSWORD` . These parameters are used to configure SASL/SCRAM-SHA-512 authentication with `KAFKA` .\n- Optional: `KAFKA_SASL_GSSAPI_KEYTAB` , `KAFKA_SASL_GSSAPI_KRB5_CONF` , `KAFKA_SASL_GSSAPI_SERVICE` , `KAFKA_SASL_GSSAPI_PRINCIPAL` . These parameters are used to configure SASL/GSSAPI authentication with `KAFKA` .\n- `MONGODB` - Designates a connection to a MongoDB document database.\n\n`MONGODB` Connections use the following ConnectionParameters.\n\n- Required: `CONNECTION_URL` .\n- Required: All of ( `USERNAME` , `PASSWORD` ) or `SECRET_ID` .\n- `VIEW_VALIDATION_REDSHIFT` - Designates a connection used for view validation by Amazon Redshift.\n- `VIEW_VALIDATION_ATHENA` - Designates a connection used for view validation by Amazon Athena.\n- `NETWORK` - Designates a network connection to a data source within an Amazon Virtual Private Cloud environment (Amazon VPC).\n\n`NETWORK` Connections do not require ConnectionParameters. Instead, provide a PhysicalConnectionRequirements.\n- `MARKETPLACE` - Uses configuration settings contained in a connector purchased from AWS Marketplace to read from and write to data stores that are not natively supported by AWS Glue .\n\n`MARKETPLACE` Connections use the following ConnectionParameters.\n\n- Required: `CONNECTOR_TYPE` , `CONNECTOR_URL` , `CONNECTOR_CLASS_NAME` , `CONNECTION_URL` .\n- Required for `JDBC` `CONNECTOR_TYPE` connections: All of ( `USERNAME` , `PASSWORD` ) or `SECRET_ID` .\n- `CUSTOM` - Uses configuration settings contained in a custom connector to read from and write to data stores that are not natively supported by AWS Glue .\n\nAdditionally, a `ConnectionType` for the following SaaS connectors is supported:\n\n- `FACEBOOKADS` - Designates a connection to Facebook Ads.\n- `GOOGLEADS` - Designates a connection to Google Ads.\n- `GOOGLESHEETS` - Designates a connection to Google Sheets.\n- `GOOGLEANALYTICS4` - Designates a connection to Google Analytics 4.\n- `HUBSPOT` - Designates a connection to HubSpot.\n- `INSTAGRAMADS` - Designates a connection to Instagram Ads.\n- `INTERCOM` - Designates a connection to Intercom.\n- `JIRACLOUD` - Designates a connection to Jira Cloud.\n- `MARKETO` - Designates a connection to Adobe Marketo Engage.\n- `NETSUITEERP` - Designates a connection to Oracle NetSuite.\n- `SALESFORCE` - Designates a connection to Salesforce using OAuth authentication.\n- `SALESFORCEMARKETINGCLOUD` - Designates a connection to Salesforce Marketing Cloud.\n- `SALESFORCEPARDOT` - Designates a connection to Salesforce Marketing Cloud Account Engagement (MCAE).\n- `SAPODATA` - Designates a connection to SAP OData.\n- `SERVICENOW` - Designates a connection to ServiceNow.\n- `SLACK` - Designates a connection to Slack.\n- `SNAPCHATADS` - Designates a connection to Snapchat Ads.\n- `STRIPE` - Designates a connection to Stripe.\n- `ZENDESK` - Designates a connection to Zendesk.\n- `ZOHOCRM` - Designates a connection to Zoho CRM.\n\nFor more information on the connection parameters needed for a particular connector, see the documentation for the connector in [Adding an AWS Glue connection](https://docs.aws.amazon.com/glue/latest/dg/console-connections.html) in the AWS Glue User Guide.\n\n`SFTP` is not supported.\n\nFor more information about how optional ConnectionProperties are used to configure features in AWS Glue , consult [AWS Glue connection properties](https://docs.aws.amazon.com/glue/latest/dg/connection-defining.html) .\n\nFor more information about how optional ConnectionProperties are used to configure features in AWS Glue Studio, consult [Using connectors and connections](https://docs.aws.amazon.com/glue/latest/ug/connectors-chapter.html) .", "Description": "The description of the connection.", "MatchCriteria": "A list of criteria that can be used in selecting this connection.", "Name": "The name of the connection.", @@ -18669,7 +18918,7 @@ "SecurityConfiguration": "The name of the `SecurityConfiguration` structure to be used with this job.", "Tags": "The tags to use with this job.", "Timeout": "The job timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours).", - "WorkerType": "The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.\n\n- For the `G.1X` worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.\n- For the `G.2X` worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.\n- For the `G.4X` worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for AWS Glue version 3.0 or later Spark ETL jobs in the following AWS Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).\n- For the `G.8X` worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for AWS Glue version 3.0 or later Spark ETL jobs, in the same AWS Regions as supported for the `G.4X` worker type.\n- For the `G.025X` worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for AWS Glue version 3.0 streaming jobs.\n- For the `Z.2X` worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler." + "WorkerType": "The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.\n\n- For the `G.1X` worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 94GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.\n- For the `G.2X` worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 138GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.\n- For the `G.4X` worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for AWS Glue version 3.0 or later Spark ETL jobs in the following AWS Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).\n- For the `G.8X` worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for AWS Glue version 3.0 or later Spark ETL jobs, in the same AWS Regions as supported for the `G.4X` worker type.\n- For the `G.025X` worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk, and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for AWS Glue version 3.0 or later streaming jobs.\n- For the `Z.2X` worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk, and provides up to 8 Ray workers based on the autoscaler." }, "AWS::Glue::Job ConnectionsList": { "Connections": "A list of connections used by the job." @@ -19278,7 +19527,7 @@ "LocalDeviceResourceData": "Settings for a local device resource.", "LocalVolumeResourceData": "Settings for a local volume resource.", "S3MachineLearningModelResourceData": "Settings for a machine learning resource stored in Amazon S3 .", - "SageMakerMachineLearningModelResourceData": "Settings for a machine learning resource saved as an SageMaker training job.", + "SageMakerMachineLearningModelResourceData": "Settings for a machine learning resource saved as an SageMaker AI training job.", "SecretsManagerSecretResourceData": "Settings for a secret resource." }, "AWS::Greengrass::ResourceDefinition ResourceDefinitionVersion": { @@ -19301,7 +19550,7 @@ "AWS::Greengrass::ResourceDefinition SageMakerMachineLearningModelResourceData": { "DestinationPath": "The absolute local path of the resource inside the Lambda environment.", "OwnerSetting": "The owner setting for the downloaded machine learning resource. For more information, see [Access Machine Learning Resources from Lambda Functions](https://docs.aws.amazon.com/greengrass/v1/developerguide/access-ml-resources.html) in the *Developer Guide* .", - "SageMakerJobArn": "The Amazon Resource Name (ARN) of the Amazon SageMaker training job that represents the source model." + "SageMakerJobArn": "The Amazon Resource Name (ARN) of the Amazon SageMaker AI training job that represents the source model." }, "AWS::Greengrass::ResourceDefinition SecretsManagerSecretResourceData": { "ARN": "The Amazon Resource Name (ARN) of the Secrets Manager secret to make available on the core. The value of the secret's latest version (represented by the `AWSCURRENT` staging label) is included by default.", @@ -19328,7 +19577,7 @@ "LocalDeviceResourceData": "Settings for a local device resource.", "LocalVolumeResourceData": "Settings for a local volume resource.", "S3MachineLearningModelResourceData": "Settings for a machine learning resource stored in Amazon S3 .", - "SageMakerMachineLearningModelResourceData": "Settings for a machine learning resource saved as an SageMaker training job.", + "SageMakerMachineLearningModelResourceData": "Settings for a machine learning resource saved as an SageMaker AI training job.", "SecretsManagerSecretResourceData": "Settings for a secret resource." }, "AWS::Greengrass::ResourceDefinitionVersion ResourceDownloadOwnerSetting": { @@ -19348,7 +19597,7 @@ "AWS::Greengrass::ResourceDefinitionVersion SageMakerMachineLearningModelResourceData": { "DestinationPath": "The absolute local path of the resource inside the Lambda environment.", "OwnerSetting": "The owner setting for the downloaded machine learning resource. For more information, see [Access Machine Learning Resources from Lambda Functions](https://docs.aws.amazon.com/greengrass/v1/developerguide/access-ml-resources.html) in the *Developer Guide* .", - "SageMakerJobArn": "The Amazon Resource Name (ARN) of the Amazon SageMaker training job that represents the source model." + "SageMakerJobArn": "The Amazon Resource Name (ARN) of the Amazon SageMaker AI training job that represents the source model." }, "AWS::Greengrass::ResourceDefinitionVersion SecretsManagerSecretResourceData": { "ARN": "The Amazon Resource Name (ARN) of the Secrets Manager secret to make available on the core. The value of the secret's latest version (represented by the `AWSCURRENT` staging label) is included by default.", @@ -20209,7 +20458,7 @@ "Image": "The AMI ID to use as the base image for a container build and test instance. If not specified, Image Builder will use the appropriate ECS-optimized AMI as a base image." }, "AWS::ImageBuilder::ContainerRecipe TargetContainerRepository": { - "RepositoryName": "The name of the container repository where the output container image is stored. This name is prefixed by the repository location.", + "RepositoryName": "The name of the container repository where the output container image is stored. This name is prefixed by the repository location. For example, `/repository_name` .", "Service": "Specifies the service in which this image was registered." }, "AWS::ImageBuilder::DistributionConfiguration": { @@ -20266,7 +20515,7 @@ "SetDefaultVersion": "Set the specified Amazon EC2 launch template as the default launch template for the specified account." }, "AWS::ImageBuilder::DistributionConfiguration TargetContainerRepository": { - "RepositoryName": "The name of the container repository where the output container image is stored. This name is prefixed by the repository location.", + "RepositoryName": "The name of the container repository where the output container image is stored. This name is prefixed by the repository location. For example, `/repository_name` .", "Service": "Specifies the service in which this image was registered." }, "AWS::ImageBuilder::Image": { @@ -20291,7 +20540,7 @@ }, "AWS::ImageBuilder::Image ImageTestsConfiguration": { "ImageTestsEnabled": "Determines if tests should run after building the image. Image Builder defaults to enable tests to run following the image build, before image distribution.", - "TimeoutMinutes": "The maximum time in minutes that tests are permitted to run.\n\n> The timeoutMinutes attribute is not currently active. This value is ignored." + "TimeoutMinutes": "The maximum time in minutes that tests are permitted to run.\n\n> The timeout attribute is not currently active. This value is ignored." }, "AWS::ImageBuilder::Image WorkflowConfiguration": { "OnFailure": "The action to take if the workflow fails.", @@ -20329,7 +20578,7 @@ }, "AWS::ImageBuilder::ImagePipeline ImageTestsConfiguration": { "ImageTestsEnabled": "Defines if tests should be executed when building this image. For example, `true` or `false` .", - "TimeoutMinutes": "The maximum time in minutes that tests are permitted to run.\n\n> The timeoutMinutes attribute is not currently active. This value is ignored." + "TimeoutMinutes": "The maximum time in minutes that tests are permitted to run.\n\n> The timeout attribute is not currently active. This value is ignored." }, "AWS::ImageBuilder::ImagePipeline Schedule": { "PipelineExecutionStartCondition": "The condition configures when the pipeline should trigger a new image build. When the `pipelineExecutionStartCondition` is set to `EXPRESSION_MATCH_AND_DEPENDENCY_UPDATES_AVAILABLE` , and you use semantic version filters on the base image or components in your image recipe, Image Builder will build a new image only when there are new versions of the image or components in your recipe that match the semantic version filter. When it is set to `EXPRESSION_MATCH_ONLY` , it will build a new image every time the CRON expression matches the current time. For semantic version syntax, see [CreateComponent](https://docs.aws.amazon.com/imagebuilder/latest/APIReference/API_CreateComponent.html) in the *Image Builder API Reference* .", @@ -20607,7 +20856,7 @@ "Value": "The value to filter on." }, "AWS::InternetMonitor::Monitor": { - "HealthEventsConfig": "A complex type with the configuration information that determines the threshold and other conditions for when Internet Monitor creates a health event for an overall performance or availability issue, across an application's geographies.\n\nDefines the percentages, for overall performance scores and availability scores for an application, that are the thresholds for when Amazon CloudWatch Internet Monitor creates a health event. You can override the defaults to set a custom threshold for overall performance or availability scores, or both.\n\nYou can also set thresholds for local health scores,, where Internet Monitor creates a health event when scores cross a threshold for one or more city-networks, in addition to creating an event when an overall score crosses a threshold.\n\nIf you don't set a health event threshold, the default value is 95%.\n\nFor local thresholds, you also set a minimum percentage of overall traffic that is impacted by an issue before Internet Monitor creates an event. In addition, you can disable local thresholds, for performance scores, availability scores, or both.\n\nFor more information, see [Change health event thresholds](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-IM-overview.html#IMUpdateThresholdFromOverview) in the Internet Monitor section of the *CloudWatch User Guide* .", + "HealthEventsConfig": "A complex type with the configuration information that determines the threshold and other conditions for when Internet Monitor creates a health event for an overall performance or availability issue, across an application's geographies.\n\nDefines the percentages, for overall performance scores and availability scores for an application, that are the thresholds for when Internet Monitor creates a health event. You can override the defaults to set a custom threshold for overall performance or availability scores, or both.\n\nYou can also set thresholds for local health scores,, where Internet Monitor creates a health event when scores cross a threshold for one or more city-networks, in addition to creating an event when an overall score crosses a threshold.\n\nIf you don't set a health event threshold, the default value is 95%.\n\nFor local thresholds, you also set a minimum percentage of overall traffic that is impacted by an issue before Internet Monitor creates an event. In addition, you can disable local thresholds, for performance scores, availability scores, or both.\n\nFor more information, see [Change health event thresholds](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-IM-overview.html#IMUpdateThresholdFromOverview) in the Internet Monitor section of the *CloudWatch User Guide* .", "IncludeLinkedAccounts": "A boolean option that you can set to `TRUE` to include monitors for linked accounts in a list of monitors, when you've set up cross-account sharing in Internet Monitor. You configure cross-account sharing by using Amazon CloudWatch Observability Access Manager. For more information, see [Internet Monitor cross-account observability](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cwim-cross-account.html) in the Amazon CloudWatch User Guide.", "InternetMeasurementsLogDelivery": "Publish internet measurements for a monitor for all city-networks (up to the 500,000 service limit) to another location, such as an Amazon S3 bucket. Measurements are also published to Amazon CloudWatch Logs for the first 500 (by traffic volume) city-networks (client locations and ASNs, typically internet service providers or ISPs).", "LinkedAccountId": "The account ID for an account that you've set up cross-account sharing for in Internet Monitor. You configure cross-account sharing by using Amazon CloudWatch Observability Access Manager. For more information, see [Internet Monitor cross-account observability](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cwim-cross-account.html) in the Amazon CloudWatch User Guide.", @@ -20643,6 +20892,21 @@ "Key": "", "Value": "" }, + "AWS::Invoicing::InvoiceUnit": { + "Description": "The assigned description for an invoice unit. This information can't be modified or deleted.", + "InvoiceReceiver": "The account that receives invoices related to the invoice unit.", + "Name": "A unique name that is distinctive within your AWS .", + "ResourceTags": "The tag structure that contains a tag key and value.", + "Rule": "An `InvoiceUnitRule` object used the categorize invoice units.", + "TaxInheritanceDisabled": "Whether the invoice unit based tax inheritance is/ should be enabled or disabled." + }, + "AWS::Invoicing::InvoiceUnit ResourceTag": { + "Key": "The object key of your of your resource tag.", + "Value": "The specific value of the resource tag." + }, + "AWS::Invoicing::InvoiceUnit Rule": { + "LinkedAccounts": "The list of `LINKED_ACCOUNT` IDs where charges are included within the invoice unit." + }, "AWS::IoT1Click::Device": { "DeviceId": "The ID of the device, such as `G030PX0312744DWM` .", "Enabled": "A Boolean value indicating whether the device is enabled ( `true` ) or not ( `false` )." @@ -20759,6 +21023,43 @@ "Key": "The tag's key.", "Value": "The tag's value." }, + "AWS::IoT::Command": { + "CommandId": "The unique identifier of the command.", + "CreatedAt": "The timestamp, when the command was created.", + "Deprecated": "Indicates whether the command has been deprecated.", + "Description": "The description of the command parameter.", + "DisplayName": "The display name of the command.", + "LastUpdatedAt": "The timestamp, when the command was last updated.", + "MandatoryParameters": "", + "Namespace": "", + "Payload": "", + "PendingDeletion": "Indicates whether the command is pending deletion.", + "RoleArn": "", + "Tags": "" + }, + "AWS::IoT::Command CommandParameter": { + "DefaultValue": "", + "Description": "", + "Name": "", + "Value": "" + }, + "AWS::IoT::Command CommandParameterValue": { + "B": "", + "BIN": "", + "D": "", + "I": "", + "L": "", + "S": "", + "UL": "" + }, + "AWS::IoT::Command CommandPayload": { + "Content": "", + "ContentType": "" + }, + "AWS::IoT::Command Tag": { + "Key": "The tag's key.", + "Value": "The tag's value." + }, "AWS::IoT::CustomMetric": { "DisplayName": "The friendly name in the console for the custom metric. This name doesn't have to be unique. Don't use this name as the metric identifier in the device metric report. You can update the friendly name after you define it.", "MetricName": "The name of the custom metric. This will be used in the metric report submitted from the device/thing. The name can't begin with `aws:` . You can\u2019t change the name after you define it.", @@ -20803,8 +21104,8 @@ }, "AWS::IoT::DomainConfiguration ServerCertificateConfig": { "EnableOCSPCheck": "A Boolean value that indicates whether Online Certificate Status Protocol (OCSP) server certificate check is enabled or not. For more information, see [Configurable endpoints](https://docs.aws.amazon.com//iot/latest/developerguide/iot-custom-endpoints-configurable.html) from the AWS IoT Core Developer Guide.", - "OcspAuthorizedResponderArn": "", - "OcspLambdaArn": "" + "OcspAuthorizedResponderArn": "The Amazon Resource Name (ARN) for an X.509 certificate stored in ACM. If provided, AWS IoT Core will use this certificate to validate the signature of the received OCSP response. The OCSP responder must sign responses using either this authorized responder certificate or the issuing certificate, depending on whether the ARN is provided or not. The certificate must be in the same account and region as the domain configuration.", + "OcspLambdaArn": "The Amazon Resource Name (ARN) for a Lambda function that acts as a Request for Comments (RFC) 6960-compliant Online Certificate Status Protocol (OCSP) responder, supporting basic OCSP responses. The Lambda function accepts a base64-encoding of the OCSP request in the Distinguished Encoding Rules (DER) format. The Lambda function's response is also a base64-encoded OCSP response in the DER format. The response size must not exceed 4 kilobytes (KiB). The Lambda function must be in the same account and region as the domain configuration." }, "AWS::IoT::DomainConfiguration ServerCertificateSummary": { "ServerCertificateArn": "The ARN of the server certificate.", @@ -23408,7 +23709,7 @@ "CapacityUnits": "Specifies additional capacity units configured for your Enterprise Edition index. You can add and remove capacity units to fit your usage requirements.", "Description": "A description for the index.", "DocumentMetadataConfigurations": "Specifies the properties of an index field. You can add either a custom or a built-in field. You can add and remove built-in fields at any time. When a built-in field is removed it's configuration reverts to the default for the field. Custom fields can't be removed from an index after they are added.", - "Edition": "Indicates whether the index is a Enterprise Edition index or a Developer Edition index. Valid values are `DEVELOPER_EDITION` and `ENTERPRISE_EDITION` .", + "Edition": "Indicates whether the index is a Enterprise Edition index, a Developer Edition index, or a GenAI Enterprise Edition index.", "Name": "The name of the index.", "RoleArn": "An IAM role that gives Amazon Kendra permissions to access your Amazon CloudWatch logs and metrics. This is also the role used when you use the [BatchPutDocument](https://docs.aws.amazon.com/kendra/latest/dg/BatchPutDocument.html) operation to index documents from an Amazon S3 bucket.", "ServerSideEncryptionConfiguration": "The identifier of the AWS KMS customer managed key (CMK) to use to encrypt data indexed by Amazon Kendra. Amazon Kendra doesn't support asymmetric CMKs.", @@ -25680,45 +25981,45 @@ "TrackerName": "The name for the tracker resource.\n\nRequirements:\n\n- Contain only alphanumeric characters (A-Z, a-z, 0-9) , hyphens (-), periods (.), and underscores (_).\n- Must be a unique tracker resource name.\n- No spaces allowed. For example, `ExampleTracker` ." }, "AWS::Logs::AccountPolicy": { - "PolicyDocument": "Specify the policy, in JSON.\n\n*Data protection policy*\n\nA data protection policy must include two JSON blocks:\n\n- The first block must include both a `DataIdentifer` array and an `Operation` property with an `Audit` action. The `DataIdentifer` array lists the types of sensitive data that you want to mask. For more information about the available options, see [Types of data that you can mask](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/mask-sensitive-log-data-types.html) .\n\nThe `Operation` property with an `Audit` action is required to find the sensitive data terms. This `Audit` action must contain a `FindingsDestination` object. You can optionally use that `FindingsDestination` object to list one or more destinations to send audit findings to. If you specify destinations such as log groups, Firehose streams, and S3 buckets, they must already exist.\n- The second block must include both a `DataIdentifer` array and an `Operation` property with an `Deidentify` action. The `DataIdentifer` array must exactly match the `DataIdentifer` array in the first block of the policy.\n\nThe `Operation` property with the `Deidentify` action is what actually masks the data, and it must contain the `\"MaskConfig\": {}` object. The `\"MaskConfig\": {}` object must be empty.\n\n> The contents of the two `DataIdentifer` arrays must match exactly. \n\nIn addition to the two JSON blocks, the `policyDocument` can also include `Name` , `Description` , and `Version` fields. The `Name` is different than the operation's `policyName` parameter, and is used as a dimension when CloudWatch Logs reports audit findings metrics to CloudWatch .\n\nThe JSON specified in `policyDocument` can be up to 30,720 characters long.\n\n*Subscription filter policy*\n\nA subscription filter policy can include the following attributes in a JSON block:\n\n- *DestinationArn* The ARN of the destination to deliver log events to. Supported destinations are:\n\n- An Kinesis Data Streams data stream in the same account as the subscription policy, for same-account delivery.\n- An Firehose data stream in the same account as the subscription policy, for same-account delivery.\n- A Lambda function in the same account as the subscription policy, for same-account delivery.\n- A logical destination in a different account created with [PutDestination](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDestination.html) , for cross-account delivery. Kinesis Data Streams and Firehose are supported as logical destinations.\n- *RoleArn* The ARN of an IAM role that grants CloudWatch Logs permissions to deliver ingested log events to the destination stream. You don't need to provide the ARN when you are working with a logical destination for cross-account delivery.\n- *FilterPattern* A filter pattern for subscribing to a filtered stream of log events.\n- *Distribution* The method used to distribute log data to the destination. By default, log data is grouped by log stream, but the grouping can be set to `Random` for a more even distribution. This property is only applicable when the destination is an Kinesis Data Streams data stream.", + "PolicyDocument": "Specify the policy, in JSON.\n\n*Data protection policy*\n\nA data protection policy must include two JSON blocks:\n\n- The first block must include both a `DataIdentifer` array and an `Operation` property with an `Audit` action. The `DataIdentifer` array lists the types of sensitive data that you want to mask. For more information about the available options, see [Types of data that you can mask](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/mask-sensitive-log-data-types.html) .\n\nThe `Operation` property with an `Audit` action is required to find the sensitive data terms. This `Audit` action must contain a `FindingsDestination` object. You can optionally use that `FindingsDestination` object to list one or more destinations to send audit findings to. If you specify destinations such as log groups, Firehose streams, and S3 buckets, they must already exist.\n- The second block must include both a `DataIdentifer` array and an `Operation` property with an `Deidentify` action. The `DataIdentifer` array must exactly match the `DataIdentifer` array in the first block of the policy.\n\nThe `Operation` property with the `Deidentify` action is what actually masks the data, and it must contain the `\"MaskConfig\": {}` object. The `\"MaskConfig\": {}` object must be empty.\n\n> The contents of the two `DataIdentifer` arrays must match exactly. \n\nIn addition to the two JSON blocks, the `policyDocument` can also include `Name` , `Description` , and `Version` fields. The `Name` is different than the operation's `policyName` parameter, and is used as a dimension when CloudWatch Logs reports audit findings metrics to CloudWatch .\n\nThe JSON specified in `policyDocument` can be up to 30,720 characters long.\n\n*Subscription filter policy*\n\nA subscription filter policy can include the following attributes in a JSON block:\n\n- *DestinationArn* The ARN of the destination to deliver log events to. Supported destinations are:\n\n- An Kinesis Data Streams data stream in the same account as the subscription policy, for same-account delivery.\n- An Firehose data stream in the same account as the subscription policy, for same-account delivery.\n- A Lambda function in the same account as the subscription policy, for same-account delivery.\n- A logical destination in a different account created with [PutDestination](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDestination.html) , for cross-account delivery. Kinesis Data Streams and Firehose are supported as logical destinations.\n- *RoleArn* The ARN of an IAM role that grants CloudWatch Logs permissions to deliver ingested log events to the destination stream. You don't need to provide the ARN when you are working with a logical destination for cross-account delivery.\n- *FilterPattern* A filter pattern for subscribing to a filtered stream of log events.\n- *Distribution* The method used to distribute log data to the destination. By default, log data is grouped by log stream, but the grouping can be set to `Random` for a more even distribution. This property is only applicable when the destination is an Kinesis Data Streams data stream.\n\n*Field index policy*\n\nA field index filter policy can include the following attribute in a JSON block:\n\n- *Fields* The array of field indexes to create.\n\nThe following is an example of an index policy document that creates two indexes, `RequestId` and `TransactionId` .\n\n`\"policyDocument\": \"{ \\\"Fields\\\": [ \\\"RequestId\\\", \\\"TransactionId\\\" ] }\"`\n\n*Transformer policy*\n\nA transformer policy must include one JSON block with the array of processors and their configurations. For more information about available processors, see [Processors that you can use](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CloudWatch-Logs-Transformation.html#CloudWatch-Logs-Transformation-Processors) .", "PolicyName": "A name for the policy. This must be unique within the account.", "PolicyType": "The type of policy that you're creating or updating.", - "Scope": "Currently the only valid value for this parameter is `ALL` , which specifies that the policy applies to all log groups in the account. If you omit this parameter, the default of `ALL` is used. To scope down a subscription filter policy to a subset of log groups, use the `selectionCriteria` parameter.", - "SelectionCriteria": "Use this parameter to apply a subscription filter policy to a subset of log groups in the account. Currently, the only supported filter is `LogGroupName NOT IN []` . The `selectionCriteria` string can be up to 25KB in length. The length is determined by using its UTF-8 bytes.\n\nUsing the `selectionCriteria` parameter is useful to help prevent infinite loops. For more information, see [Log recursion prevention](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/Subscriptions-recursion-prevention.html) .\n\nSpecifing `selectionCriteria` is valid only when you specify `SUBSCRIPTION_FILTER_POLICY` for `policyType` ." + "Scope": "Currently the only valid value for this parameter is `ALL` , which specifies that the policy applies to all log groups in the account. If you omit this parameter, the default of `ALL` is used. To scope down a subscription filter policy to a subset of log groups, use the `SelectionCriteria` parameter.", + "SelectionCriteria": "Use this parameter to apply the new policy to a subset of log groups in the account.\n\nYou need to specify `SelectionCriteria` only when you specify `SUBSCRIPTION_FILTER_POLICY` , `FIELD_INDEX_POLICY` or `TRANSFORMER_POLICY` for `PolicyType` .\n\nIf `PolicyType` is `SUBSCRIPTION_FILTER_POLICY` , the only supported `SelectionCriteria` filter is `LogGroupName NOT IN []`\n\nIf `PolicyType` is `FIELD_INDEX_POLICY` or `TRANSFORMER_POLICY` , the only supported `SelectionCriteria` filter is `LogGroupNamePrefix`\n\nThe `SelectionCriteria` string can be up to 25KB in length. The length is determined by using its UTF-8 bytes.\n\nUsing the `SelectionCriteria` parameter with `SUBSCRIPTION_FILTER_POLICY` is useful to help prevent infinite loops. For more information, see [Log recursion prevention](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/Subscriptions-recursion-prevention.html) ." }, "AWS::Logs::Delivery": { "DeliveryDestinationArn": "The ARN of the delivery destination that is associated with this delivery.", "DeliverySourceName": "The name of the delivery source that is associated with this delivery.", "FieldDelimiter": "The field delimiter that is used between record fields when the final output format of a delivery is in `Plain` , `W3C` , or `Raw` format.", - "RecordFields": "The record fields used in this delivery.", - "S3EnableHiveCompatiblePath": "", - "S3SuffixPath": "", - "Tags": "The tags that have been assigned to this delivery." + "RecordFields": "The list of record fields to be delivered to the destination, in order. If the delivery's log source has mandatory fields, they must be included in this list.", + "S3EnableHiveCompatiblePath": "Use this parameter to cause the S3 objects that contain delivered logs to use a prefix structure that allows for integration with Apache Hive.", + "S3SuffixPath": "Use this to reconfigure the S3 object prefix to contain either static or variable sections. The valid variables to use in the suffix path will vary by each log source. To find the values supported for the suffix path for each log source, use the [DescribeConfigurationTemplates](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_DescribeConfigurationTemplates.html) operation and check the `allowedSuffixPathFields` field in the response.", + "Tags": "An array of key-value pairs to apply to the delivery.\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html) ." }, "AWS::Logs::Delivery Tag": { - "Key": "", - "Value": "" + "Key": "The key of this key-value pair.", + "Value": "The value of this key-value pair." }, "AWS::Logs::DeliveryDestination": { - "DeliveryDestinationPolicy": "A structure that contains information about one delivery destination policy.", - "DestinationResourceArn": "The ARN of the AWS destination that this delivery destination represents. That AWS destination can be a log group in CloudWatch Logs, an Amazon S3 bucket, or a delivery stream in Firehose.", + "DeliveryDestinationPolicy": "An IAM policy that grants permissions to CloudWatch Logs to deliver logs cross-account to a specified destination in this account. For examples of this policy, see [Examples](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDeliveryDestinationPolicy.html#API_PutDeliveryDestinationPolicy_Examples) in the CloudWatch Logs API Reference.", + "DestinationResourceArn": "The ARN of the AWS destination that this delivery destination represents. That AWS destination can be a log group in CloudWatch Logs , an Amazon S3 bucket, or a Firehose stream.", "Name": "The name of this delivery destination.", "OutputFormat": "The format of the logs that are sent to this delivery destination.", - "Tags": "The tags that have been assigned to this delivery destination." + "Tags": "An array of key-value pairs to apply to the delivery destination.\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html) ." }, "AWS::Logs::DeliveryDestination Tag": { - "Key": "", - "Value": "" + "Key": "The key of this key-value pair.", + "Value": "The value of this key-value pair." }, "AWS::Logs::DeliverySource": { "LogType": "The type of log that the source is sending. For valid values for this parameter, see the documentation for the source service.", "Name": "The unique name of the delivery source.", - "ResourceArn": "", - "Tags": "The tags that have been assigned to this delivery source." + "ResourceArn": "The ARN of the AWS resource that is generating and sending logs. For example, `arn:aws:workmail:us-east-1:123456789012:organization/m-1234EXAMPLEabcd1234abcd1234abcd1234`", + "Tags": "An array of key-value pairs to apply to the delivery source.\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html) ." }, "AWS::Logs::DeliverySource Tag": { "Key": "", - "Value": "" + "Value": "The value of this key-value pair." }, "AWS::Logs::Destination": { "DestinationName": "The name of the destination.", @@ -25726,6 +26027,21 @@ "RoleArn": "The ARN of an IAM role that permits CloudWatch Logs to send data to the specified AWS resource.", "TargetArn": "The Amazon Resource Name (ARN) of the physical target where the log events are delivered (for example, a Kinesis stream)." }, + "AWS::Logs::Integration": { + "IntegrationName": "The name of this integration.", + "IntegrationType": "The type of integration. Integrations with OpenSearch Service have the type `OPENSEARCH` .", + "ResourceConfig": "This structure contains configuration details about an integration between CloudWatch Logs and another entity." + }, + "AWS::Logs::Integration OpenSearchResourceConfig": { + "ApplicationARN": "If you want to use an existing OpenSearch Service application for your integration with OpenSearch Service, specify it here. If you omit this, a new application will be created.", + "DashboardViewerPrincipals": "Specify the ARNs of IAM roles and IAM users who you want to grant permission to for viewing the dashboards.\n\n> In addition to specifying these users here, you must also grant them the *CloudWatchOpenSearchDashboardsAccess* IAM policy. For more information, see [IAM policies for users](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/OpenSearch-Dashboards-UserRoles.html) .", + "DataSourceRoleArn": "Specify the ARN of an IAM role that CloudWatch Logs will use to create the integration. This role must have the permissions necessary to access the OpenSearch Service collection to be able to create the dashboards. For more information about the permissions needed, see [Permissions that the integration needs](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/OpenSearch-Dashboards-CreateRole.html) in the CloudWatch Logs User Guide.", + "KmsKeyArn": "To have the vended dashboard data encrypted with AWS KMS instead of the CloudWatch Logs default encryption method, specify the ARN of the AWS KMS key that you want to use.", + "RetentionDays": "Specify how many days that you want the data derived by OpenSearch Service to be retained in the index that the dashboard refers to. This also sets the maximum time period that you can choose when viewing data in the dashboard. Choosing a longer time frame will incur additional costs." + }, + "AWS::Logs::Integration ResourceConfig": { + "OpenSearchResourceConfig": "This structure contains configuration details about an integration between CloudWatch Logs and OpenSearch Service." + }, "AWS::Logs::LogAnomalyDetector": { "AccountId": "The ID of the account to create the anomaly detector in.", "AnomalyVisibilityTime": "The number of days to have visibility on an anomaly. After this time period has elapsed for an anomaly, it will be automatically baselined and the anomaly detector will treat new occurrences of a similar anomaly as normal. Therefore, if you do not correct the cause of an anomaly during the time period specified in `AnomalyVisibilityTime` , it will be considered normal going forward and will not be detected as an anomaly.", @@ -25737,6 +26053,7 @@ }, "AWS::Logs::LogGroup": { "DataProtectionPolicy": "Creates a data protection policy and assigns it to the log group. A data protection policy can help safeguard sensitive data that's ingested by the log group by auditing and masking the sensitive log data. When a user who does not have permission to view masked data views a log event that includes masked data, the sensitive data is replaced by asterisks.\n\nFor more information, including a list of types of data that can be audited and masked, see [Protect sensitive log data with masking](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/mask-sensitive-log-data.html) .", + "FieldIndexPolicies": "Creates or updates a *field index policy* for the specified log group. Only log groups in the Standard log class support field index policies. For more information about log classes, see [Log classes](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CloudWatch_Logs_Log_Classes.html) .\n\nYou can use field index policies to create *field indexes* on fields found in log events in the log group. Creating field indexes lowers the costs for CloudWatch Logs Insights queries that reference those field indexes, because these queries attempt to skip the processing of log events that are known to not match the indexed field. Good fields to index are fields that you often need to query for and fields that have high cardinality of values Common examples of indexes include request ID, session ID, userID, and instance IDs. For more information, see [Create field indexes to improve query performance and reduce costs](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CloudWatchLogs-Field-Indexing.html) .\n\nCurrently, this array supports only one field index policy object.", "KmsKeyId": "The Amazon Resource Name (ARN) of the AWS KMS key to use when encrypting log data.\n\nTo associate an AWS KMS key with the log group, specify the ARN of that KMS key here. If you do so, ingested data is encrypted using this key. This association is stored as long as the data encrypted with the KMS key is still within CloudWatch Logs . This enables CloudWatch Logs to decrypt this data whenever it is requested.\n\nIf you attempt to associate a KMS key with the log group but the KMS key doesn't exist or is deactivated, you will receive an `InvalidParameterException` error.\n\nLog group data is always encrypted in CloudWatch Logs . If you omit this key, the encryption does not use AWS KMS . For more information, see [Encrypt log data in CloudWatch Logs using AWS Key Management Service](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/encrypt-log-data-kms.html)", "LogGroupClass": "Specifies the log group class for this log group. There are two classes:\n\n- The `Standard` log class supports all CloudWatch Logs features.\n- The `Infrequent Access` log class supports a subset of CloudWatch Logs features and incurs lower costs.\n\nFor details about the features supported by each class, see [Log classes](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CloudWatch_Logs_Log_Classes.html)", "LogGroupName": "The name of the log group. If you don't specify a name, AWS CloudFormation generates a unique ID for the log group.", @@ -25745,13 +26062,14 @@ }, "AWS::Logs::LogGroup Tag": { "Key": "", - "Value": "" + "Value": "The value of this key-value pair." }, "AWS::Logs::LogStream": { "LogGroupName": "The name of the log group where the log stream is created.", "LogStreamName": "The name of the log stream. The name must be unique within the log group." }, "AWS::Logs::MetricFilter": { + "ApplyOnTransformedLogs": "This parameter is valid only for log groups that have an active log transformer. For more information about log transformers, see [PutTransformer](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutTransformer.html) .\n\nIf this value is `true` , the metric filter is applied on the transformed version of the log events instead of the original ingested log events.", "FilterName": "The name of the metric filter.", "FilterPattern": "A filter pattern for extracting metric data out of ingested log events. For more information, see [Filter and Pattern Syntax](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/FilterAndPatternSyntax.html) .", "LogGroupName": "The name of an existing log group that you want to associate with this metric filter.", @@ -25772,6 +26090,7 @@ "AWS::Logs::QueryDefinition": { "LogGroupNames": "Use this parameter if you want the query to query only certain log groups.", "Name": "A name for the query definition.\n\n> You can use the name to create a folder structure for your queries. To create a folder, use a forward slash (/) to prefix your desired query name with your desired folder name. For example, `*folder-name* / *query-name*` .", + "QueryLanguage": "The query language used for this query. For more information about the query languages that CloudWatch Logs supports, see [Supported query languages](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CWL_AnalyzeLogData_Languages.html) .", "QueryString": "The query string to use for this query definition. For more information, see [CloudWatch Logs Insights Query Syntax](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CWL_QuerySyntax.html) ." }, "AWS::Logs::ResourcePolicy": { @@ -25779,6 +26098,7 @@ "PolicyName": "The name of the resource policy." }, "AWS::Logs::SubscriptionFilter": { + "ApplyOnTransformedLogs": "This parameter is valid only for log groups that have an active log transformer. For more information about log transformers, see [PutTransformer](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutTransformer.html) .\n\nIf this value is `true` , the subscription filter is applied on the transformed version of the log events instead of the original ingested log events.", "DestinationArn": "The Amazon Resource Name (ARN) of the destination.", "Distribution": "The method used to distribute log data to the destination, which can be either random or grouped by log stream.", "FilterName": "The name of the subscription filter.", @@ -25786,6 +26106,155 @@ "LogGroupName": "The log group to associate with the subscription filter. All log events that are uploaded to this log group are filtered and delivered to the specified AWS resource if the filter pattern matches the log events.", "RoleArn": "The ARN of an IAM role that grants CloudWatch Logs permissions to deliver ingested log events to the destination stream. You don't need to provide the ARN when you are working with a logical destination for cross-account delivery." }, + "AWS::Logs::Transformer": { + "LogGroupIdentifier": "Specify either the name or ARN of the log group to create the transformer for.", + "TransformerConfig": "This structure is an array that contains the configuration of this log transformer. A log transformer is an array of processors, where each processor applies one type of transformation to the log events that are ingested." + }, + "AWS::Logs::Transformer AddKeyEntry": { + "Key": "The key of the new entry to be added to the log event", + "OverwriteIfExists": "Specifies whether to overwrite the value if the key already exists in the log event. If you omit this, the default is `false` .", + "Value": "The value of the new entry to be added to the log event" + }, + "AWS::Logs::Transformer AddKeys": { + "Entries": "An array of objects, where each object contains the information about one key to add to the log event." + }, + "AWS::Logs::Transformer CopyValue": { + "Entries": "An array of `CopyValueEntry` objects, where each object contains the information about one field value to copy." + }, + "AWS::Logs::Transformer CopyValueEntry": { + "OverwriteIfExists": "Specifies whether to overwrite the value if the destination key already exists. If you omit this, the default is `false` .", + "Source": "The key to copy.", + "Target": "The key of the field to copy the value to." + }, + "AWS::Logs::Transformer Csv": { + "Columns": "An array of names to use for the columns in the transformed log event.\n\nIf you omit this, default column names ( `[column_1, column_2 ...]` ) are used.", + "Delimiter": "The character used to separate each column in the original comma-separated value log event. If you omit this, the processor looks for the comma `,` character as the delimiter.", + "QuoteCharacter": "The character used used as a text qualifier for a single column of data. If you omit this, the double quotation mark `\"` character is used.", + "Source": "The path to the field in the log event that has the comma separated values to be parsed. If you omit this value, the whole log message is processed." + }, + "AWS::Logs::Transformer DateTimeConverter": { + "Locale": "The locale of the source field. If you omit this, the default of `locale.ROOT` is used.", + "MatchPatterns": "A list of patterns to match against the `source` field.", + "Source": "The key to apply the date conversion to.", + "SourceTimezone": "The time zone of the source field. If you omit this, the default used is the UTC zone.", + "Target": "The JSON field to store the result in.", + "TargetFormat": "The datetime format to use for the converted data in the target field.\n\nIf you omit this, the default of `yyyy-MM-dd'T'HH:mm:ss.SSS'Z` is used.", + "TargetTimezone": "The time zone of the target field. If you omit this, the default used is the UTC zone." + }, + "AWS::Logs::Transformer DeleteKeys": { + "WithKeys": "The list of keys to delete." + }, + "AWS::Logs::Transformer Grok": { + "Match": "The grok pattern to match against the log event. For a list of supported grok patterns, see [Supported grok patterns](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CloudWatch-Logs-Transformation.html#Grok-Patterns) .", + "Source": "The path to the field in the log event that you want to parse. If you omit this value, the whole log message is parsed." + }, + "AWS::Logs::Transformer ListToMap": { + "Flatten": "A Boolean value to indicate whether the list will be flattened into single items. Specify `true` to flatten the list. The default is `false`", + "FlattenedElement": "If you set `flatten` to `true` , use `flattenedElement` to specify which element, `first` or `last` , to keep.\n\nYou must specify this parameter if `flatten` is `true`", + "Key": "The key of the field to be extracted as keys in the generated map", + "Source": "The key in the log event that has a list of objects that will be converted to a map.", + "Target": "The key of the field that will hold the generated map", + "ValueKey": "If this is specified, the values that you specify in this parameter will be extracted from the `source` objects and put into the values of the generated map. Otherwise, original objects in the source list will be put into the values of the generated map." + }, + "AWS::Logs::Transformer LowerCaseString": { + "WithKeys": "The array caontaining the keys of the fields to convert to lowercase." + }, + "AWS::Logs::Transformer MoveKeyEntry": { + "OverwriteIfExists": "Specifies whether to overwrite the value if the destination key already exists. If you omit this, the default is `false` .", + "Source": "The key to move.", + "Target": "The key to move to." + }, + "AWS::Logs::Transformer MoveKeys": { + "Entries": "An array of objects, where each object contains the information about one key to move." + }, + "AWS::Logs::Transformer ParseCloudfront": { + "Source": "Omit this parameter and the whole log message will be processed by this processor. No other value than `@message` is allowed for `source` ." + }, + "AWS::Logs::Transformer ParseJSON": { + "Destination": "The location to put the parsed key value pair into. If you omit this parameter, it is placed under the root node.", + "Source": "Path to the field in the log event that will be parsed. Use dot notation to access child fields. For example, `store.book`" + }, + "AWS::Logs::Transformer ParseKeyValue": { + "Destination": "The destination field to put the extracted key-value pairs into", + "FieldDelimiter": "The field delimiter string that is used between key-value pairs in the original log events. If you omit this, the ampersand `&` character is used.", + "KeyPrefix": "If you want to add a prefix to all transformed keys, specify it here.", + "KeyValueDelimiter": "The delimiter string to use between the key and value in each pair in the transformed log event.\n\nIf you omit this, the equal `=` character is used.", + "NonMatchValue": "A value to insert into the value field in the result, when a key-value pair is not successfully split.", + "OverwriteIfExists": "Specifies whether to overwrite the value if the destination key already exists. If you omit this, the default is `false` .", + "Source": "Path to the field in the log event that will be parsed. Use dot notation to access child fields. For example, `store.book`" + }, + "AWS::Logs::Transformer ParsePostgres": { + "Source": "Omit this parameter and the whole log message will be processed by this processor. No other value than `@message` is allowed for `source` ." + }, + "AWS::Logs::Transformer ParseRoute53": { + "Source": "Omit this parameter and the whole log message will be processed by this processor. No other value than `@message` is allowed for `source` ." + }, + "AWS::Logs::Transformer ParseVPC": { + "Source": "Omit this parameter and the whole log message will be processed by this processor. No other value than `@message` is allowed for `source` ." + }, + "AWS::Logs::Transformer ParseWAF": { + "Source": "Omit this parameter and the whole log message will be processed by this processor. No other value than `@message` is allowed for `source` ." + }, + "AWS::Logs::Transformer Processor": { + "AddKeys": "Use this parameter to include the [addKeys](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CloudWatch-Logs-Transformation.html#CloudWatch-Logs-Transformation-addKeys) processor in your transformer.", + "CopyValue": "Use this parameter to include the [copyValue](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CloudWatch-Logs-Transformation.html#CloudWatch-Logs-Transformation-copyValue) processor in your transformer.", + "Csv": "Use this parameter to include the [CSV](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CloudWatch-Logs-Transformation.html#CloudWatch-Logs-Transformation-CSV) processor in your transformer.", + "DateTimeConverter": "Use this parameter to include the [datetimeConverter](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CloudWatch-Logs-Transformation.html#CloudWatch-Logs-Transformation-datetimeConverter) processor in your transformer.", + "DeleteKeys": "Use this parameter to include the [deleteKeys](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CloudWatch-Logs-Transformation.html#CloudWatch-Logs-Transformation-deleteKeys) processor in your transformer.", + "Grok": "Use this parameter to include the [grok](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CloudWatch-Logs-Transformation.html#CloudWatch-Logs-Transformation-grok) processor in your transformer.", + "ListToMap": "Use this parameter to include the [listToMap](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CloudWatch-Logs-Transformation.html#CloudWatch-Logs-Transformation-listToMap) processor in your transformer.", + "LowerCaseString": "Use this parameter to include the [lowerCaseString](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CloudWatch-Logs-Transformation.html#CloudWatch-Logs-Transformation-lowerCaseString) processor in your transformer.", + "MoveKeys": "Use this parameter to include the [moveKeys](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CloudWatch-Logs-Transformation.html#CloudWatch-Logs-Transformation-moveKeys) processor in your transformer.", + "ParseCloudfront": "Use this parameter to include the [parseCloudfront](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CloudWatch-Logs-Transformation.html#CloudWatch-Logs-Transformation-parseCloudfront) processor in your transformer.\n\nIf you use this processor, it must be the first processor in your transformer.", + "ParseJSON": "Use this parameter to include the [parseJSON](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CloudWatch-Logs-Transformation.html#CloudWatch-Logs-Transformation-parseJSON) processor in your transformer.", + "ParseKeyValue": "Use this parameter to include the [parseKeyValue](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CloudWatch-Logs-Transformation.html#CloudWatch-Logs-Transformation-parseKeyValue) processor in your transformer.", + "ParsePostgres": "Use this parameter to include the [parsePostGres](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CloudWatch-Logs-Transformation.html#CloudWatch-Logs-Transformation-parsePostGres) processor in your transformer.\n\nIf you use this processor, it must be the first processor in your transformer.", + "ParseRoute53": "Use this parameter to include the [parseRoute53](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CloudWatch-Logs-Transformation.html#CloudWatch-Logs-Transformation-parseRoute53) processor in your transformer.\n\nIf you use this processor, it must be the first processor in your transformer.", + "ParseVPC": "Use this parameter to include the [parseVPC](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CloudWatch-Logs-Transformation.html#CloudWatch-Logs-Transformation-parseVPC) processor in your transformer.\n\nIf you use this processor, it must be the first processor in your transformer.", + "ParseWAF": "Use this parameter to include the [parseWAF](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CloudWatch-Logs-Transformation.html#CloudWatch-Logs-Transformation-parseWAF) processor in your transformer.\n\nIf you use this processor, it must be the first processor in your transformer.", + "RenameKeys": "Use this parameter to include the [renameKeys](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CloudWatch-Logs-Transformation.html#CloudWatch-Logs-Transformation-renameKeys) processor in your transformer.", + "SplitString": "Use this parameter to include the [splitString](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CloudWatch-Logs-Transformation.html#CloudWatch-Logs-Transformation-splitString) processor in your transformer.", + "SubstituteString": "Use this parameter to include the [substituteString](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CloudWatch-Logs-Transformation.html#CloudWatch-Logs-Transformation-substituteString) processor in your transformer.", + "TrimString": "Use this parameter to include the [trimString](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CloudWatch-Logs-Transformation.html#CloudWatch-Logs-Transformation-trimString) processor in your transformer.", + "TypeConverter": "Use this parameter to include the [typeConverter](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CloudWatch-Logs-Transformation.html#CloudWatch-Logs-Transformation-typeConverter) processor in your transformer.", + "UpperCaseString": "Use this parameter to include the [upperCaseString](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CloudWatch-Logs-Transformation.html#CloudWatch-Logs-Transformation-upperCaseString) processor in your transformer." + }, + "AWS::Logs::Transformer RenameKeyEntry": { + "Key": "The key to rename", + "OverwriteIfExists": "Specifies whether to overwrite the existing value if the destination key already exists. The default is `false`", + "RenameTo": "The string to use for the new key name" + }, + "AWS::Logs::Transformer RenameKeys": { + "Entries": "An array of `RenameKeyEntry` objects, where each object contains the information about a single key to rename." + }, + "AWS::Logs::Transformer SplitString": { + "Entries": "An array of `SplitStringEntry` objects, where each object contains the information about one field to split." + }, + "AWS::Logs::Transformer SplitStringEntry": { + "Delimiter": "The separator characters to split the string entry on.", + "Source": "The key of the field to split." + }, + "AWS::Logs::Transformer SubstituteString": { + "Entries": "An array of objects, where each object contains the information about one key to match and replace." + }, + "AWS::Logs::Transformer SubstituteStringEntry": { + "From": "The regular expression string to be replaced. Special regex characters such as [ and ] must be escaped using \\\\ when using double quotes and with \\ when using single quotes. For more information, see [Class Pattern](https://docs.aws.amazon.com/https://docs.oracle.com/en/java/javase/17/docs/api/java.base/java/util/regex/Pattern.html) on the Oracle web site.", + "Source": "The key to modify", + "To": "The string to be substituted for each match of `from`" + }, + "AWS::Logs::Transformer TrimString": { + "WithKeys": "The array containing the keys of the fields to trim." + }, + "AWS::Logs::Transformer TypeConverter": { + "Entries": "An array of `TypeConverterEntry` objects, where each object contains the information about one field to change the type of." + }, + "AWS::Logs::Transformer TypeConverterEntry": { + "Key": "The key with the value that is to be converted to a different type.", + "Type": "The type to convert the field value to. Valid values are `integer` , `double` , `string` and `boolean` ." + }, + "AWS::Logs::Transformer UpperCaseString": { + "WithKeys": "The array of containing the keys of the field to convert to uppercase." + }, "AWS::LookoutEquipment::InferenceScheduler": { "DataDelayOffsetInMinutes": "A period of time (in minutes) by which inference on the data is delayed after the data starts. For instance, if an offset delay time of five minutes was selected, inference will not begin on the data until the first data measurement after the five minute mark. For example, if five minutes is selected, the inference scheduler will wake up at the configured frequency with the additional five minute delay time to check the customer S3 bucket. The customer can upload data at the same frequency and they don't need to stop and restart the scheduler when uploading new data.", "DataInputConfiguration": "Specifies configuration information for the input data for the inference scheduler, including delimiter, format, and dataset location.", @@ -25960,6 +26429,7 @@ "InstanceType": "The instance type of the runtime environment.", "KmsKeyId": "The identifier of a customer managed key.", "Name": "The name of the runtime environment.", + "NetworkType": "The network type supported by the runtime environment.", "PreferredMaintenanceWindow": "Configures the maintenance window that you want for the runtime environment. The maintenance window must have the format `ddd:hh24:mi-ddd:hh24:mi` and must be less than 24 hours. The following two examples are valid maintenance windows: `sun:23:45-mon:00:15` or `sat:01:00-sat:03:00` .\n\nIf you do not provide a value, a random system-generated value will be assigned.", "PubliclyAccessible": "Specifies whether the runtime environment is publicly accessible.", "SecurityGroupIds": "The list of security groups for the VPC associated with this runtime environment.", @@ -26396,6 +26866,7 @@ }, "AWS::MediaConnect::Bridge BridgeNetworkSource": { "MulticastIp": "The network source multicast IP.", + "MulticastSourceSettings": "The settings related to the multicast source.", "Name": "The name of the network source. This name is used to reference the source and must be unique among sources in this bridge.", "NetworkName": "The network source's gateway network name.", "Port": "The network source port.", @@ -26420,6 +26891,9 @@ "MaxBitrate": "The maximum expected bitrate (in bps) of the ingress bridge.", "MaxOutputs": "The maximum number of outputs on the ingress bridge." }, + "AWS::MediaConnect::Bridge MulticastSourceSettings": { + "MulticastSourceIp": "The IP address of the source for source-specific multicast (SSM)." + }, "AWS::MediaConnect::Bridge SourcePriority": { "PrimarySource": "The name of the source you choose as the primary source for this flow." }, @@ -26450,10 +26924,14 @@ }, "AWS::MediaConnect::BridgeSource BridgeNetworkSource": { "MulticastIp": "The network source multicast IP.", + "MulticastSourceSettings": "The settings related to the multicast source.", "NetworkName": "The network source's gateway network name.", "Port": "The network source port.", "Protocol": "The network source protocol." }, + "AWS::MediaConnect::BridgeSource MulticastSourceSettings": { + "MulticastSourceIp": "The IP address of the source for source-specific multicast (SSM)." + }, "AWS::MediaConnect::BridgeSource VpcInterfaceAttachment": { "VpcInterfaceName": "The name of the VPC interface that you want to send your output to." }, @@ -26722,6 +27200,7 @@ "Tags": "An array of key-value pairs to apply to this resource.\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html) ." }, "AWS::MediaLive::Channel": { + "AnywhereSettings": "", "CdiInputSpecification": "Specification of CDI inputs for this channel.", "ChannelClass": "The class for this channel. For a channel with two pipelines, the class is STANDARD. For a channel with one pipeline, the class is SINGLE_PIPELINE.", "Destinations": "The settings that identify the destination for the outputs in this MediaLive output package.", @@ -26759,6 +27238,10 @@ "AWS::MediaLive::Channel AncillarySourceSettings": { "SourceAncillaryChannelNumber": "Specifies the number (1 to 4) of the captions channel you want to extract from the ancillary captions. If you plan to convert the ancillary captions to another format, complete this field. If you plan to choose Embedded as the captions destination in the output (to pass through all the channels in the ancillary captions), leave this field blank because MediaLive ignores the field." }, + "AWS::MediaLive::Channel AnywhereSettings": { + "ChannelPlacementGroupId": "", + "ClusterId": "" + }, "AWS::MediaLive::Channel ArchiveCdnSettings": { "ArchiveS3Settings": "Sets up Amazon S3 as the destination for this Archive output." }, @@ -26862,6 +27345,31 @@ "InputPreference": "Input preference when deciding which input to make active when a previously failed input has recovered.", "SecondaryInputId": "The input ID of the secondary input in the automatic input failover pair." }, + "AWS::MediaLive::Channel Av1ColorSpaceSettings": { + "ColorSpacePassthroughSettings": "", + "Hdr10Settings": "", + "Rec601Settings": "", + "Rec709Settings": "" + }, + "AWS::MediaLive::Channel Av1Settings": { + "AfdSignaling": "", + "BufSize": "", + "ColorSpaceSettings": "", + "FixedAfd": "", + "FramerateDenominator": "", + "FramerateNumerator": "", + "GopSize": "", + "GopSizeUnits": "", + "Level": "", + "LookAheadRateControl": "", + "MaxBitrate": "", + "MinIInterval": "", + "ParDenominator": "", + "ParNumerator": "", + "QvbrQualityLevel": "", + "SceneChangeDetect": "", + "TimecodeBurninSettings": "" + }, "AWS::MediaLive::Channel AvailBlanking": { "AvailBlankingImage": "The blanking image to be used. Keep empty for solid black. Only .bmp and .png images are supported.", "State": "When set to enabled, the video, audio, and captions are blanked when insertion metadata is added." @@ -26875,6 +27383,10 @@ "Scte35SpliceInsert": "The setup for SCTE-35 splice insert handling.", "Scte35TimeSignalApos": "The setup for SCTE-35 time signal APOS handling." }, + "AWS::MediaLive::Channel BandwidthReductionFilterSettings": { + "PostFilterSharpening": "", + "Strength": "" + }, "AWS::MediaLive::Channel BlackoutSlate": { "BlackoutSlateImage": "The blackout slate image to be used. Keep empty for solid black. Only .bmp and .png images are supported.", "NetworkEndBlackout": "Setting to enabled causes MediaLive to blackout the video, audio, and captions, and raise the \"Network Blackout Image\" slate when an SCTE104/35 Network End Segmentation Descriptor is encountered. The blackout is lifted when the Network Start Segmentation Descriptor is encountered. The Network End and Network Start descriptors must contain a network ID that matches the value entered in Network ID.", @@ -27136,6 +27648,7 @@ "Rec709Settings": "Settings to configure the handling of Rec709 color space." }, "AWS::MediaLive::Channel H264FilterSettings": { + "BandwidthReductionFilterSettings": "", "TemporalFilterSettings": "Settings for applying the temporal filter to the video." }, "AWS::MediaLive::Channel H264Settings": { @@ -27191,6 +27704,7 @@ "Rec709Settings": "Settings to configure the handling of Rec709 color space." }, "AWS::MediaLive::Channel H265FilterSettings": { + "BandwidthReductionFilterSettings": "", "TemporalFilterSettings": "Settings for applying the temporal filter to the video." }, "AWS::MediaLive::Channel H265Settings": { @@ -27343,7 +27857,8 @@ "AutomaticInputFailoverSettings": "Settings to implement automatic input failover in this input.", "InputAttachmentName": "A name for the attachment. This is required if you want to use this input in an input switch action.", "InputId": "The ID of the input to attach.", - "InputSettings": "Information about the content to extract from the input and about the general handling of the content." + "InputSettings": "Information about the content to extract from the input and about the general handling of the content.", + "LogicalInterfaceNames": "" }, "AWS::MediaLive::Channel InputChannelLevel": { "Gain": "The remixing value. Units are in dB, and acceptable values are within the range from -60 (mute) to 6 dB.", @@ -27525,7 +28040,30 @@ "H265PackagingType": "Only applicable when this output is referencing an H.265 video description.\nSpecifies whether MP4 segments should be packaged as HEV1 or HVC1.", "NameModifier": "A string that is concatenated to the end of the destination file name. This is required for multiple outputs of the same type." }, + "AWS::MediaLive::Channel MulticastInputSettings": { + "SourceIpAddress": "" + }, + "AWS::MediaLive::Channel MultiplexContainerSettings": { + "MultiplexM2tsSettings": "" + }, + "AWS::MediaLive::Channel MultiplexM2tsSettings": { + "AbsentInputAudioBehavior": "", + "Arib": "", + "AudioBufferModel": "", + "AudioFramesPerPes": "", + "AudioStreamType": "", + "CcDescriptor": "", + "Ebif": "", + "EsRateInPes": "", + "Klv": "", + "NielsenId3Behavior": "", + "PcrControl": "", + "PcrPeriod": "", + "Scte35Control": "", + "Scte35PrerollPullupMilliseconds": "" + }, "AWS::MediaLive::Channel MultiplexOutputSettings": { + "ContainerSettings": "", "Destination": "Destination is a Multiplex." }, "AWS::MediaLive::Channel MultiplexProgramChannelDestinationSettings": { @@ -27534,6 +28072,7 @@ }, "AWS::MediaLive::Channel NetworkInputSettings": { "HlsInputSettings": "Information about how to connect to the upstream system.", + "MulticastInputSettings": "", "ServerValidation": "Checks HTTPS server certificates. When set to checkCryptographyOnly, cryptography in the certificate is checked, but not the server's name. Certain subdomains (notably S3 buckets that use dots in the bucket name) don't strictly match the corresponding certificate's wildcard pattern and would otherwise cause the channel to error. This setting is ignored for protocols that do not use HTTPS." }, "AWS::MediaLive::Channel NielsenCBET": { @@ -27566,7 +28105,8 @@ "Id": "The ID for this destination.", "MediaPackageSettings": "The destination settings for a MediaPackage output.", "MultiplexSettings": "Destination settings for a Multiplex output; one destination for both encoders.", - "Settings": "The destination settings for an output." + "Settings": "The destination settings for an output.", + "SrtSettings": "" }, "AWS::MediaLive::Channel OutputDestinationSettings": { "PasswordParam": "The password parameter that holds the password for accessing the downstream system. This password parameter applies only if the downstream system requires credentials.", @@ -27588,6 +28128,7 @@ "MsSmoothGroupSettings": "The configuration of a Microsoft Smooth output group.", "MultiplexGroupSettings": "The settings for a Multiplex output group.", "RtmpGroupSettings": "The configuration of an RTMP output group.", + "SrtGroupSettings": "", "UdpGroupSettings": "The configuration of a UDP output group." }, "AWS::MediaLive::Channel OutputLocationRef": { @@ -27606,6 +28147,7 @@ "MsSmoothOutputSettings": "The settings for a Microsoft Smooth output.", "MultiplexOutputSettings": "Configuration of a Multiplex output.", "RtmpOutputSettings": "The settings for an RTMP output.\n\nThe parent of this entity is OutputGroupSettings.", + "SrtOutputSettings": "", "UdpOutputSettings": "The settings for a UDP output.\n\nThe parent of this entity is OutputGroupSettings." }, "AWS::MediaLive::Channel RemixSettings": { @@ -27647,6 +28189,21 @@ "NoRegionalBlackoutFlag": "When set to ignore, segment descriptors with noRegionalBlackoutFlag set to 0 no longer trigger blackouts or ad avail slates.", "WebDeliveryAllowedFlag": "When set to ignore, segment descriptors with webDeliveryAllowedFlag set to 0 no longer trigger blackouts or ad avail slates." }, + "AWS::MediaLive::Channel SrtGroupSettings": { + "InputLossAction": "" + }, + "AWS::MediaLive::Channel SrtOutputDestinationSettings": { + "EncryptionPassphraseSecretArn": "", + "StreamId": "", + "Url": "" + }, + "AWS::MediaLive::Channel SrtOutputSettings": { + "BufferMsec": "", + "ContainerSettings": "", + "Destination": "", + "EncryptionType": "", + "Latency": "" + }, "AWS::MediaLive::Channel StandardHlsSettings": { "AudioRenditionSets": "Lists all the audio groups that are used with the video output stream. This inputs all the audio GROUP-IDs that are associated with the video, separated by a comma (,).", "M3u8Settings": "Settings for the M3U8 container." @@ -27697,6 +28254,7 @@ "VideoBlackThresholdMsec": "The amount of time (in milliseconds) that the active input must be black before automatic input failover occurs." }, "AWS::MediaLive::Channel VideoCodecSettings": { + "Av1Settings": "", "FrameCaptureSettings": "The settings for the video codec in a frame capture output.", "H264Settings": "The settings for the H.264 codec in the output.", "H265Settings": "Settings for video encoded with the H265 codec.", @@ -27811,8 +28369,10 @@ "AWS::MediaLive::Input": { "Destinations": "Settings that apply only if the input is a push type of input.", "InputDevices": "Settings that apply only if the input is an Elemental Link input.", + "InputNetworkLocation": "", "InputSecurityGroups": "The list of input security groups (referenced by IDs) to attach to the input if the input is a push type.", "MediaConnectFlows": "Settings that apply only if the input is a MediaConnect input.", + "MulticastSettings": "", "Name": "A name for the input.", "RoleArn": "The IAM role for MediaLive to assume when creating a MediaConnect input or Amazon VPC input. This doesn't apply to other types of inputs. The role is identified by its ARN.", "Sources": "Settings that apply only if the input is a pull type of input.", @@ -27822,11 +28382,18 @@ "Vpc": "Settings that apply only if the input is an push input where the source is on Amazon VPC." }, "AWS::MediaLive::Input InputDestinationRequest": { + "Network": "", + "NetworkRoutes": "", + "StaticIpAddress": "", "StreamName": "The stream name (application name/application instance) for the location the RTMP source content will be pushed to in MediaLive." }, "AWS::MediaLive::Input InputDeviceSettings": { "Id": "The unique ID for the device." }, + "AWS::MediaLive::Input InputRequestDestinationRoute": { + "Cidr": "", + "Gateway": "" + }, "AWS::MediaLive::Input InputSourceRequest": { "PasswordParam": "The password parameter that holds the password for accessing the upstream system. The password parameter applies only if the upstream system requires credentials.", "Url": "For a pull input, the URL where MediaLive pulls the source content from.", @@ -27839,6 +28406,13 @@ "AWS::MediaLive::Input MediaConnectFlowRequest": { "FlowArn": "The ARN of one or two MediaConnect flows that are the sources for this MediaConnect input." }, + "AWS::MediaLive::Input MulticastSettingsCreateRequest": { + "Sources": "" + }, + "AWS::MediaLive::Input MulticastSourceCreateRequest": { + "SourceIp": "", + "Url": "" + }, "AWS::MediaLive::Input SrtCallerDecryptionRequest": { "Algorithm": "", "PassphraseSecretArn": "" @@ -28593,11 +29167,12 @@ "ClusterName": "The name of the cluster .", "DataTiering": "Enables data tiering. Data tiering is only supported for clusters using the r6gd node type. This parameter must be set when using r6gd nodes. For more information, see [Data tiering](https://docs.aws.amazon.com/memorydb/latest/devguide/data-tiering.html) .", "Description": "A description of the cluster .", - "Engine": "The Valkey or Redis OSS engine used by the cluster.", - "EngineVersion": "The Valkey or Redis OSS engine version used by the cluster .", + "Engine": "The name of the engine used by the cluster.", + "EngineVersion": "The Redis engine version used by the cluster .", "FinalSnapshotName": "The user-supplied name of a final cluster snapshot. This is the unique name that identifies the snapshot. MemoryDB creates the snapshot, and then deletes the cluster immediately afterward.", "KmsKeyId": "The ID of the KMS key used to encrypt the cluster .", "MaintenanceWindow": "Specifies the weekly time range during which maintenance on the cluster is performed. It is specified as a range in the format `ddd:hh24:mi-ddd:hh24:mi` (24H Clock UTC). The minimum maintenance window is a 60 minute period.\n\n*Pattern* : `ddd:hh24:mi-ddd:hh24:mi`", + "MultiRegionClusterName": "The name of the multi-Region cluster that this cluster belongs to.", "NodeType": "The cluster 's node type.", "NumReplicasPerShard": "The number of replicas to apply to each shard.\n\n*Default value* : `1`\n\n*Maximum value* : `5`", "NumShards": "The number of shards in the cluster .", @@ -28622,6 +29197,22 @@ "Key": "The key for the tag. May not be null.", "Value": "The tag's value. May be null." }, + "AWS::MemoryDB::MultiRegionCluster": { + "Description": "The description of the multi-Region cluster.", + "Engine": "The name of the engine used by the multi-Region cluster.", + "EngineVersion": "The version of the engine used by the multi-Region cluster.", + "MultiRegionClusterNameSuffix": "A suffix to be added to the Multi-Region cluster name. Amazon MemoryDB automatically applies a prefix to the Multi-Region cluster Name when it is created. Each Amazon Region has its own prefix. For instance, a Multi-Region cluster Name created in the US-West-1 region will begin with \"virxk\", along with the suffix name you provide. The suffix guarantees uniqueness of the Multi-Region cluster name across multiple regions.", + "MultiRegionParameterGroupName": "The name of the multi-Region parameter group associated with the cluster.", + "NodeType": "The node type used by the multi-Region cluster.", + "NumShards": "TBD", + "TLSEnabled": "Indiciates if the multi-Region cluster is TLS enabled.", + "Tags": "A list of tags to be applied to the multi-Region cluster.", + "UpdateStrategy": "The strategy to use for the update operation. Supported values are \"coordinated\" or \"uncoordinated\"." + }, + "AWS::MemoryDB::MultiRegionCluster Tag": { + "Key": "The key for the tag. May not be null.", + "Value": "The tag's value. May be null." + }, "AWS::MemoryDB::ParameterGroup": { "Description": "A description of the parameter group.", "Family": "The name of the parameter group family that this parameter group is compatible with.", @@ -29132,6 +29723,28 @@ "Key": "The tag key.\n\nConstraints: Maximum length of 128 characters.", "Value": "The tag value.\n\nConstraints: Maximum length of 256 characters." }, + "AWS::NetworkManager::DirectConnectGatewayAttachment": { + "CoreNetworkId": "", + "DirectConnectGatewayArn": "The Direct Connect gateway attachment ARN.", + "EdgeLocations": "", + "ProposedNetworkFunctionGroupChange": "Describes proposed changes to a network function group.", + "ProposedSegmentChange": "Describes a proposed segment change. In some cases, the segment change must first be evaluated and accepted.", + "Tags": "" + }, + "AWS::NetworkManager::DirectConnectGatewayAttachment ProposedNetworkFunctionGroupChange": { + "AttachmentPolicyRuleNumber": "The proposed new attachment policy rule number for the network function group.", + "NetworkFunctionGroupName": "The proposed name change for the network function group name.", + "Tags": "The list of proposed changes to the key-value tags associated with the network function group." + }, + "AWS::NetworkManager::DirectConnectGatewayAttachment ProposedSegmentChange": { + "AttachmentPolicyRuleNumber": "The rule number in the policy document that applies to this change.", + "SegmentName": "The name of the segment to change.", + "Tags": "The list of key-value tags that changed for the segment." + }, + "AWS::NetworkManager::DirectConnectGatewayAttachment Tag": { + "Key": "The tag key.\n\nConstraints: Maximum length of 128 characters.", + "Value": "The tag value.\n\nConstraints: Maximum length of 256 characters." + }, "AWS::NetworkManager::GlobalNetwork": { "CreatedAt": "The date and time that the global network was created.", "Description": "A description of the global network.\n\nConstraints: Maximum length of 256 characters.", @@ -29642,6 +30255,7 @@ "InstanceCount": "The number of data nodes (instances) to use in the OpenSearch Service domain.", "InstanceType": "The instance type for your data nodes, such as `m3.medium.search` . For valid values, see [Supported instance types in Amazon OpenSearch Service](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/supported-instance-types.html) .", "MultiAZWithStandbyEnabled": "Indicates whether Multi-AZ with Standby deployment option is enabled. For more information, see [Multi-AZ with Standby](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/managedomains-multiaz.html#managedomains-za-standby) .", + "NodeOptions": "List of node options for the domain.", "WarmCount": "The number of warm nodes in the cluster.", "WarmEnabled": "Whether to enable UltraWarm storage for the cluster. See [UltraWarm storage for Amazon OpenSearch Service](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/ultrawarm.html) .", "WarmType": "The instance type for the cluster's warm nodes.", @@ -29702,6 +30316,15 @@ "MasterUserName": "Username for the master user. Only specify if `InternalUserDatabaseEnabled` is true in [AdvancedSecurityOptionsInput](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-opensearchservice-domain-advancedsecurityoptionsinput.html) .\n\nIf you don't want to specify this value directly within the template, you can use a [dynamic reference](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/dynamic-references.html) instead.", "MasterUserPassword": "Password for the master user. Only specify if `InternalUserDatabaseEnabled` is true in [AdvancedSecurityOptionsInput](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-opensearchservice-domain-advancedsecurityoptionsinput.html) .\n\nIf you don't want to specify this value directly within the template, you can use a [dynamic reference](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/dynamic-references.html) instead." }, + "AWS::OpenSearchService::Domain NodeConfig": { + "Count": "The number of nodes of a particular node type in the cluster.", + "Enabled": "A boolean that indicates whether a particular node type is enabled or not.", + "Type": "The instance type of a particular node type in the cluster." + }, + "AWS::OpenSearchService::Domain NodeOption": { + "NodeConfig": "Container for specifying configuration of any node type.", + "NodeType": "Container for node type like coordinating." + }, "AWS::OpenSearchService::Domain NodeToNodeEncryptionOptions": { "Enabled": "Specifies to enable or disable node-to-node encryption on the domain. Required if you enable fine-grained access control in [AdvancedSecurityOptionsInput](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-opensearchservice-domain-advancedsecurityoptionsinput.html) ." }, @@ -30022,7 +30645,7 @@ "Value": "The string value that's associated with the key of the tag. You can set the value of a tag to an empty string, but you can't set the value of a tag to null." }, "AWS::Organizations::Policy": { - "Content": "The policy text content. You can specify the policy content as a JSON object or a JSON string.\n\n> When you specify the policy content as a JSON string, you can't perform drift detection on the CloudFormation stack. For this reason, we recommend specifying the policy content as a JSON object instead. \n\nThe text that you supply must adhere to the rules of the policy type you specify in the `Type` parameter. The following AWS Organizations quotas are enforced for the maximum size of a policy document:\n\n- Service control policies: 5,120 characters\n- Backup policies: 10,000 characters\n- Tag policies: 10,000 characters\n- Chatbot policies: 10,000 characters\n- AI services opt-out policies: 2,500 characters\n\nFor more information about Organizations service quotas, see [Quotas for AWS Organizations](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) in the *AWS Organizations User Guide* .", + "Content": "The policy text content. You can specify the policy content as a JSON object or a JSON string.\n\n> When you specify the policy content as a JSON string, you can't perform drift detection on the CloudFormation stack. For this reason, we recommend specifying the policy content as a JSON object instead. \n\nThe text that you supply must adhere to the rules of the policy type you specify in the `Type` parameter. The following AWS Organizations quotas are enforced for the maximum size of a policy document:\n\n- Service control policies: 5,120 characters\n- Resource control policies: 5,120 characters\n- Declarative policies: 10,000 characters\n- Backup policies: 10,000 characters\n- Tag policies: 10,000 characters\n- Chatbot policies: 10,000 characters\n- AI services opt-out policies: 2,500 characters\n\nFor more information about Organizations service quotas, see [Quotas for AWS Organizations](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) in the *AWS Organizations User Guide* .", "Description": "Human readable description of the policy.", "Name": "Name of the policy.\n\nThe [regex pattern](https://docs.aws.amazon.com/http://wikipedia.org/wiki/regex) that is used to validate this parameter is a string of any of the characters in the ASCII character range.", "Tags": "A list of tags that you want to attach to the newly created policy. For each tag in the list, you must specify both a tag key and a value. You can set the value to an empty string, but you can't set it to `null` . For more information about tagging, see [Tagging AWS Organizations resources](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_tagging.html) in the AWS Organizations User Guide.\n\n> If any one of the tags is not valid or if you exceed the allowed number of tags for a policy, then the entire request fails and the policy is not created.", @@ -30288,6 +30911,97 @@ "Issuer": "The issuer value to copy into your Microsoft Entra app registration's OIDC.", "Subject": "The subject value to copy into your Microsoft Entra app registration's OIDC." }, + "AWS::PCS::Cluster": { + "Name": "The name that identifies the cluster.", + "Networking": "The networking configuration for the cluster's control plane.", + "Scheduler": "The cluster management and job scheduling software associated with the cluster.", + "Size": "The size of the cluster.", + "SlurmConfiguration": "Additional options related to the Slurm scheduler.", + "Tags": "1 or more tags added to the resource. Each tag consists of a tag key and tag value. The tag value is optional and can be an empty string." + }, + "AWS::PCS::Cluster AuthKey": { + "SecretArn": "The Amazon Resource Name (ARN) of the shared Slurm key.", + "SecretVersion": "The version of the shared Slurm key." + }, + "AWS::PCS::Cluster Endpoint": { + "Port": "The endpoint's connection port number.", + "PrivateIpAddress": "The endpoint's private IP address.", + "PublicIpAddress": "The endpoint's public IP address.", + "Type": "Indicates the type of endpoint running at the specific IP address." + }, + "AWS::PCS::Cluster ErrorInfo": { + "Code": "The short-form error code.", + "Message": "The detailed error information." + }, + "AWS::PCS::Cluster Networking": { + "SecurityGroupIds": "The list of security group IDs associated with the Elastic Network Interface (ENI) created in subnets.", + "SubnetIds": "The list of subnet IDs where AWS PCS creates an Elastic Network Interface (ENI) to enable communication between managed controllers and AWS PCS resources. The subnet must have an available IP address, cannot reside in AWS Outposts, AWS Wavelength, or an AWS Local Zone. AWS PCS currently supports only 1 subnet in this list." + }, + "AWS::PCS::Cluster Scheduler": { + "Type": "The software AWS PCS uses to manage cluster scaling and job scheduling.", + "Version": "The version of the specified scheduling software that AWS PCS uses to manage cluster scaling and job scheduling." + }, + "AWS::PCS::Cluster SlurmConfiguration": { + "AuthKey": "The shared Slurm key for authentication, also known as the cluster secret.", + "ScaleDownIdleTimeInSeconds": "The time before an idle node is scaled down.", + "SlurmCustomSettings": "Additional Slurm-specific configuration that directly maps to Slurm settings." + }, + "AWS::PCS::Cluster SlurmCustomSetting": { + "ParameterName": "AWS PCS supports configuration of the following Slurm parameters:\n\n- For *clusters*\n\n- [`Prolog`](https://docs.aws.amazon.com/https://slurm.schedmd.com/slurm.conf.html#OPT_Prolog_1)\n- [`Epilog`](https://docs.aws.amazon.com/https://slurm.schedmd.com/slurm.conf.html#OPT_Epilog_1)\n- [`SelectTypeParameters`](https://docs.aws.amazon.com/https://slurm.schedmd.com/slurm.conf.html#OPT_SelectTypeParameters)\n- For *compute node groups*\n\n- [`Weight`](https://docs.aws.amazon.com/https://slurm.schedmd.com/slurm.conf.html#OPT_Weight)\n- [`RealMemory`](https://docs.aws.amazon.com/https://slurm.schedmd.com/slurm.conf.html#OPT_Weight)", + "ParameterValue": "The values for the configured Slurm settings." + }, + "AWS::PCS::ComputeNodeGroup": { + "AmiId": "The ID of the Amazon Machine Image (AMI) that AWS PCS uses to launch instances. If not provided, AWS PCS uses the AMI ID specified in the custom launch template.", + "ClusterId": "The ID of the cluster of the compute node group.", + "CustomLaunchTemplate": "An Amazon EC2 launch template AWS PCS uses to launch compute nodes.", + "IamInstanceProfileArn": "The Amazon Resource Name (ARN) of the IAM instance profile used to pass an IAM role when launching EC2 instances. The role contained in your instance profile must have pcs:RegisterComputeNodeGroupInstance permissions attached to provision instances correctly.", + "InstanceConfigs": "A list of EC2 instance configurations that AWS PCS can provision in the compute node group.", + "Name": "The name that identifies the compute node group.", + "PurchaseOption": "Specifies how EC2 instances are purchased on your behalf. AWS PCS supports On-Demand and Spot instances. For more information, see Instance purchasing options in the Amazon Elastic Compute Cloud User Guide. If you don't provide this option, it defaults to On-Demand.", + "ScalingConfiguration": "Specifies the boundaries of the compute node group auto scaling.", + "SlurmConfiguration": "Additional options related to the Slurm scheduler.", + "SpotOptions": "Additional configuration when you specify `SPOT` as the `purchaseOption` .", + "SubnetIds": "The list of subnet IDs where instances are provisioned by the compute node group. The subnets must be in the same VPC as the cluster.", + "Tags": "1 or more tags added to the resource. Each tag consists of a tag key and tag value. The tag value is optional and can be an empty string." + }, + "AWS::PCS::ComputeNodeGroup CustomLaunchTemplate": { + "Id": "The ID of the EC2 launch template to use to provision instances.", + "Version": "The version of the EC2 launch template to use to provision instances." + }, + "AWS::PCS::ComputeNodeGroup ErrorInfo": { + "Code": "The short-form error code.", + "Message": "The detailed error information." + }, + "AWS::PCS::ComputeNodeGroup InstanceConfig": { + "InstanceType": "The EC2 instance type that AWS PCS can provision in the compute node group.\n\nExample: `t2.xlarge`" + }, + "AWS::PCS::ComputeNodeGroup ScalingConfiguration": { + "MaxInstanceCount": "The upper bound of the number of instances allowed in the compute fleet.", + "MinInstanceCount": "The lower bound of the number of instances allowed in the compute fleet." + }, + "AWS::PCS::ComputeNodeGroup SlurmConfiguration": { + "SlurmCustomSettings": "Additional Slurm-specific configuration that directly maps to Slurm settings." + }, + "AWS::PCS::ComputeNodeGroup SlurmCustomSetting": { + "ParameterName": "AWS PCS supports configuration of the following Slurm parameters:\n\n- For *clusters*\n\n- [`Prolog`](https://docs.aws.amazon.com/https://slurm.schedmd.com/slurm.conf.html#OPT_Prolog_1)\n- [`Epilog`](https://docs.aws.amazon.com/https://slurm.schedmd.com/slurm.conf.html#OPT_Epilog_1)\n- [`SelectTypeParameters`](https://docs.aws.amazon.com/https://slurm.schedmd.com/slurm.conf.html#OPT_SelectTypeParameters)\n- For *compute node groups*\n\n- [`Weight`](https://docs.aws.amazon.com/https://slurm.schedmd.com/slurm.conf.html#OPT_Weight)\n- [`RealMemory`](https://docs.aws.amazon.com/https://slurm.schedmd.com/slurm.conf.html#OPT_Weight)", + "ParameterValue": "The values for the configured Slurm settings." + }, + "AWS::PCS::ComputeNodeGroup SpotOptions": { + "AllocationStrategy": "The Amazon EC2 allocation strategy AWS PCS uses to provision EC2 instances. AWS PCS supports lowest price, capacity optimized, and price capacity optimized. If you don't provide this option, it defaults to price capacity optimized." + }, + "AWS::PCS::Queue": { + "ClusterId": "The ID of the cluster of the queue.", + "ComputeNodeGroupConfigurations": "The list of compute node group configurations associated with the queue. Queues assign jobs to associated compute node groups.", + "Name": "The name that identifies the queue.", + "Tags": "1 or more tags added to the resource. Each tag consists of a tag key and tag value. The tag value is optional and can be an empty string." + }, + "AWS::PCS::Queue ComputeNodeGroupConfiguration": { + "ComputeNodeGroupId": "The compute node group ID for the compute node group configuration." + }, + "AWS::PCS::Queue ErrorInfo": { + "Code": "The short-form error code.", + "Message": "TBDThe detailed error information." + }, "AWS::Panorama::ApplicationInstance": { "ApplicationInstanceIdToReplace": "The ID of an application instance to replace with the new instance.", "DefaultRuntimeContextDevice": "The device's ID.", @@ -31223,7 +31937,7 @@ "KinesisStreamParameters": "The parameters for using a Kinesis stream as a target.", "LambdaFunctionParameters": "The parameters for using a Lambda function as a target.", "RedshiftDataParameters": "These are custom parameters to be used when the target is a Amazon Redshift cluster to invoke the Amazon Redshift Data API BatchExecuteStatement.", - "SageMakerPipelineParameters": "The parameters for using a SageMaker pipeline as a target.", + "SageMakerPipelineParameters": "The parameters for using a SageMaker AI pipeline as a target.", "SqsQueueParameters": "The parameters for using a Amazon SQS stream as a target.", "StepFunctionStateMachineParameters": "The parameters for using a Step Functions state machine as a target.", "TimestreamParameters": "The parameters for using a Timestream for LiveAnalytics table as a target." @@ -31237,7 +31951,7 @@ "WithEvent": "Indicates whether to send an event back to EventBridge after the SQL statement runs." }, "AWS::Pipes::Pipe PipeTargetSageMakerPipelineParameters": { - "PipelineParameterList": "List of Parameter names and values for SageMaker Model Building Pipeline execution." + "PipelineParameterList": "List of Parameter names and values for SageMaker AI Model Building Pipeline execution." }, "AWS::Pipes::Pipe PipeTargetSqsQueueParameters": { "MessageDeduplicationId": "This parameter applies only to FIFO (first-in-first-out) queues.\n\nThe token used for deduplication of sent messages.", @@ -31271,8 +31985,8 @@ "Prefix": "The prefix text with which to begin Amazon S3 log object names.\n\nFor more information, see [Organizing objects using prefixes](https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-prefixes.html) in the *Amazon Simple Storage Service User Guide* ." }, "AWS::Pipes::Pipe SageMakerPipelineParameter": { - "Name": "Name of parameter to start execution of a SageMaker Model Building Pipeline.", - "Value": "Value of parameter to start execution of a SageMaker Model Building Pipeline." + "Name": "Name of parameter to start execution of a SageMaker AI Model Building Pipeline.", + "Value": "Value of parameter to start execution of a SageMaker AI Model Building Pipeline." }, "AWS::Pipes::Pipe SelfManagedKafkaAccessConfigurationCredentials": { "BasicAuth": "The ARN of the Secrets Manager secret.", @@ -31333,7 +32047,7 @@ "AWS::QBusiness::Application": { "AttachmentsConfiguration": "Configuration information for the file upload during chat feature.", "AutoSubscriptionConfiguration": "Subscription configuration information for an Amazon Q Business application using IAM identity federation for user management.", - "ClientIdsForOIDC": "", + "ClientIdsForOIDC": "The OIDC client ID for a Amazon Q Business application.", "Description": "A description for the Amazon Q Business application.", "DisplayName": "The name of the Amazon Q Business application.", "EncryptionConfiguration": "Provides the identifier of the AWS KMS key used to encrypt data indexed by Amazon Q Business. Amazon Q Business doesn't support asymmetric keys.", @@ -31342,6 +32056,7 @@ "IdentityType": "The authentication type being used by a Amazon Q Business application.", "PersonalizationConfiguration": "Configuration information about chat response personalization. For more information, see [Personalizing chat responses](https://docs.aws.amazon.com/amazonq/latest/qbusiness-ug/personalizing-chat-responses.html) .", "QAppsConfiguration": "Configuration information about Amazon Q Apps.", + "QuickSightConfiguration": "The Amazon QuickSight configuration for an Amazon Q Business application that uses QuickSight as the identity provider.", "RoleArn": "The Amazon Resource Name (ARN) of an IAM role with permissions to access your Amazon CloudWatch logs and metrics. If this property is not specified, Amazon Q Business will create a [service linked role (SLR)](https://docs.aws.amazon.com/amazonq/latest/qbusiness-ug/using-service-linked-roles.html#slr-permissions) and use it as the application's role.", "Tags": "A list of key-value pairs that identify or categorize your Amazon Q Business application. You can also use tags to help control access to the application. Tag keys and values can consist of Unicode letters, digits, white space, and any of the following symbols: _ . : / = + - @." }, @@ -31361,10 +32076,53 @@ "AWS::QBusiness::Application QAppsConfiguration": { "QAppsControlMode": "Status information about whether end users can create and use Amazon Q Apps in the web experience." }, + "AWS::QBusiness::Application QuickSightConfiguration": { + "ClientNamespace": "The Amazon QuickSight namespace that is used as the identity provider. For more information about QuickSight namespaces, see [Namespace operations](https://docs.aws.amazon.com/quicksight/latest/developerguide/namespace-operations.html) ." + }, "AWS::QBusiness::Application Tag": { "Key": "The key for the tag. Keys are not case sensitive and must be unique for the Amazon Q Business application or data source.", "Value": "The value associated with the tag. The value may be an empty string but it can't be null." }, + "AWS::QBusiness::DataAccessor": { + "ActionConfigurations": "A list of action configurations specifying the allowed actions and any associated filters.", + "ApplicationId": "The unique identifier of the Amazon Q Business application.", + "DisplayName": "The friendly name of the data accessor.", + "Principal": "The Amazon Resource Name (ARN) of the IAM role for the ISV associated with this data accessor.", + "Tags": "The tags to associate with the data accessor." + }, + "AWS::QBusiness::DataAccessor ActionConfiguration": { + "Action": "The Amazon Q Business action that is allowed.", + "FilterConfiguration": "The filter configuration for the action, if any." + }, + "AWS::QBusiness::DataAccessor ActionFilterConfiguration": { + "DocumentAttributeFilter": "Enables filtering of responses based on document attributes or metadata fields." + }, + "AWS::QBusiness::DataAccessor AttributeFilter": { + "AndAllFilters": "Performs a logical `AND` operation on all supplied filters.", + "ContainsAll": "Returns `true` when a document contains all the specified document attributes or metadata fields. Supported for the following [document attribute value types](https://docs.aws.amazon.com/amazonq/latest/api-reference/API_DocumentAttributeValue.html) : `stringListValue` .", + "ContainsAny": "Returns `true` when a document contains any of the specified document attributes or metadata fields. Supported for the following [document attribute value types](https://docs.aws.amazon.com/amazonq/latest/api-reference/API_DocumentAttributeValue.html) : `stringListValue` .", + "EqualsTo": "Performs an equals operation on two document attributes or metadata fields. Supported for the following [document attribute value types](https://docs.aws.amazon.com/amazonq/latest/api-reference/API_DocumentAttributeValue.html) : `dateValue` , `longValue` , `stringListValue` and `stringValue` .", + "GreaterThan": "Performs a greater than operation on two document attributes or metadata fields. Supported for the following [document attribute value types](https://docs.aws.amazon.com/amazonq/latest/api-reference/API_DocumentAttributeValue.html) : `dateValue` and `longValue` .", + "GreaterThanOrEquals": "Performs a greater or equals than operation on two document attributes or metadata fields. Supported for the following [document attribute value types](https://docs.aws.amazon.com/amazonq/latest/api-reference/API_DocumentAttributeValue.html) : `dateValue` and `longValue` .", + "LessThan": "Performs a less than operation on two document attributes or metadata fields. Supported for the following [document attribute value types](https://docs.aws.amazon.com/amazonq/latest/api-reference/API_DocumentAttributeValue.html) : `dateValue` and `longValue` .", + "LessThanOrEquals": "Performs a less than or equals operation on two document attributes or metadata fields.Supported for the following [document attribute value type](https://docs.aws.amazon.com/amazonq/latest/api-reference/API_DocumentAttributeValue.html) : `dateValue` and `longValue` .", + "NotFilter": "Performs a logical `NOT` operation on all supplied filters.", + "OrAllFilters": "Performs a logical `OR` operation on all supplied filters." + }, + "AWS::QBusiness::DataAccessor DocumentAttribute": { + "Name": "The identifier for the attribute.", + "Value": "The value of the attribute." + }, + "AWS::QBusiness::DataAccessor DocumentAttributeValue": { + "DateValue": "A date expressed as an ISO 8601 string.\n\nIt's important for the time zone to be included in the ISO 8601 date-time format. For example, 2012-03-25T12:30:10+01:00 is the ISO 8601 date-time format for March 25th 2012 at 12:30PM (plus 10 seconds) in Central European Time.", + "LongValue": "A long integer value.", + "StringListValue": "A list of strings.", + "StringValue": "A string." + }, + "AWS::QBusiness::DataAccessor Tag": { + "Key": "The key for the tag. Keys are not case sensitive and must be unique for the Amazon Q Business application or data source.", + "Value": "The value associated with the tag. The value may be an empty string but it can't be null." + }, "AWS::QBusiness::DataSource": { "ApplicationId": "The identifier of the Amazon Q Business application the data source will be attached to.", "Configuration": "Use this property to specify a JSON or YAML schema with configuration properties specific to your data source connector to connect your data source repository to Amazon Q Business . You must use the JSON or YAML schema provided by Amazon Q .\n\nThe following links have the configuration properties and schemas for AWS CloudFormation for the following connectors:\n\n- [Amazon Simple Storage Service](https://docs.aws.amazon.com/amazonq/latest/qbusiness-ug/s3-cfn.html)\n- [Amazon Q Web Crawler](https://docs.aws.amazon.com/amazonq/latest/qbusiness-ug/web-crawler-cfn.html)\n\nSimilarly, you can find configuration templates and properties for your specific data source using the following steps:\n\n- Navigate to the [Supported connectors](https://docs.aws.amazon.com/amazonq/latest/business-use-dg/connectors-list.html) page in the Amazon Q Business User Guide, and select the data source connector of your choice.\n- Then, from that specific data source connector's page, choose the topic containing *Using AWS CloudFormation* to find the schemas for your data source connector, including configuration parameter descriptions and examples.", @@ -31372,6 +32130,7 @@ "DisplayName": "The name of the Amazon Q Business data source.", "DocumentEnrichmentConfiguration": "Provides the configuration information for altering document metadata and content during the document ingestion process.\n\nFor more information, see [Custom document enrichment](https://docs.aws.amazon.com/amazonq/latest/business-use-dg/custom-document-enrichment.html) .", "IndexId": "The identifier of the index the data source is attached to.", + "MediaExtractionConfiguration": "The configuration for extracting information from media in documents.", "RoleArn": "The Amazon Resource Name (ARN) of an IAM role with permission to access the data source and required resources.", "SyncSchedule": "Sets the frequency for Amazon Q Business to check the documents in your data source repository and update your index. If you don't set a schedule, Amazon Q Business won't periodically update the index.\n\nSpecify a `cron-` format schedule string or an empty string to indicate that the index is updated on demand. You can't specify the `Schedule` parameter when the `Type` parameter is set to `CUSTOM` . If you do, you receive a `ValidationException` exception.", "Tags": "A list of key-value pairs that identify or categorize the data source connector. You can also use tags to help control access to the data source connector. Tag keys and values can consist of Unicode letters, digits, white space, and any of the following symbols: _ . : / = + - @.", @@ -31408,11 +32167,17 @@ "RoleArn": "The Amazon Resource Name (ARN) of a role with permission to run `PreExtractionHookConfiguration` and `PostExtractionHookConfiguration` for altering document metadata and content during the document ingestion process.", "S3BucketName": "Stores the original, raw documents or the structured, parsed documents before and after altering them. For more information, see [Data contracts for Lambda functions](https://docs.aws.amazon.com/amazonq/latest/business-use-dg/cde-lambda-operations.html#cde-lambda-operations-data-contracts) ." }, + "AWS::QBusiness::DataSource ImageExtractionConfiguration": { + "ImageExtractionStatus": "Specify whether to extract semantic meaning from images and visuals from documents." + }, "AWS::QBusiness::DataSource InlineDocumentEnrichmentConfiguration": { "Condition": "Configuration of the condition used for the target document attribute or metadata field when ingesting documents into Amazon Q Business .", "DocumentContentOperator": "`TRUE` to delete content if the condition used for the target attribute is met.", "Target": "Configuration of the target document attribute or metadata field when ingesting documents into Amazon Q Business . You can also include a value." }, + "AWS::QBusiness::DataSource MediaExtractionConfiguration": { + "ImageExtractionConfiguration": "The configuration for extracting semantic meaning from images in documents. For more information, see [Extracting semantic meaning from images and visuals](https://docs.aws.amazon.com/amazonq/latest/qbusiness-ug/extracting-meaning-from-images.html) ." + }, "AWS::QBusiness::DataSource Tag": { "Key": "The key for the tag. Keys are not case sensitive and must be unique for the Amazon Q Business application or data source.", "Value": "The value associated with the tag. The value may be an empty string but it can't be null." @@ -31445,6 +32210,12 @@ "IndexedTextBytes": "The total size, in bytes, of the indexed documents.", "IndexedTextDocumentCount": "The number of text documents indexed." }, + "AWS::QBusiness::Permission": { + "Actions": "The list of Amazon Q Business actions that the ISV is allowed to perform.", + "ApplicationId": "The unique identifier of the Amazon Q Business application.", + "Principal": "Provides user and group information used for filtering documents to use for generating Amazon Q Business conversation responses.", + "StatementId": "A unique identifier for the policy statement." + }, "AWS::QBusiness::Plugin": { "ApplicationId": "The identifier of the application that will contain the plugin.", "AuthConfiguration": "Authentication configuration information for an Amazon Q Business plugin.", @@ -31469,8 +32240,10 @@ "Description": "A description for your custom plugin configuration." }, "AWS::QBusiness::Plugin OAuth2ClientCredentialConfiguration": { + "AuthorizationUrl": "The redirect URL required by the OAuth 2.0 protocol for Amazon Q Business to authenticate a plugin user through a third party authentication server.", "RoleArn": "The ARN of an IAM role used by Amazon Q Business to access the OAuth 2.0 authentication credentials stored in a Secrets Manager secret.", - "SecretArn": "The ARN of the Secrets Manager secret that stores the OAuth 2.0 credentials/token used for plugin configuration." + "SecretArn": "The ARN of the Secrets Manager secret that stores the OAuth 2.0 credentials/token used for plugin configuration.", + "TokenUrl": "The URL required by the OAuth 2.0 protocol to exchange an end user authorization code for an access token." }, "AWS::QBusiness::Plugin PluginAuthConfiguration": { "BasicAuthConfiguration": "Information about the basic authentication credentials used to configure a plugin.", @@ -31509,6 +32282,7 @@ }, "AWS::QBusiness::WebExperience": { "ApplicationId": "The identifier of the Amazon Q Business web experience.", + "CustomizationConfiguration": "Contains the configuration information to customize the logo, font, and color of an Amazon Q Business web experience with individual files for each property or a CSS file for them all.", "IdentityProviderConfiguration": "Provides information about the identity provider (IdP) used to authenticate end users of an Amazon Q Business web experience.", "Origins": "Sets the website domain origins that are allowed to embed the Amazon Q Business web experience. The *domain origin* refers to the base URL for accessing a website including the protocol ( `http/https` ), the domain name, and the port number (if specified).\n\n> You must only submit a *base URL* and not a full path. For example, `https://docs.aws.amazon.com` .", "RoleArn": "The Amazon Resource Name (ARN) of the service role attached to your web experience.\n\n> You must provide this value if you're using IAM Identity Center to manage end user access to your application. If you're using legacy identity management to manage user access, you don't need to provide this value.", @@ -31518,9 +32292,15 @@ "Title": "The title for your Amazon Q Business web experience.", "WelcomeMessage": "A message in an Amazon Q Business web experience." }, + "AWS::QBusiness::WebExperience CustomizationConfiguration": { + "CustomCSSUrl": "Provides the URL where the custom CSS file is hosted for an Amazon Q web experience.", + "FaviconUrl": "Provides the URL where the custom favicon file is hosted for an Amazon Q web experience.", + "FontUrl": "Provides the URL where the custom font file is hosted for an Amazon Q web experience.", + "LogoUrl": "Provides the URL where the custom logo file is hosted for an Amazon Q web experience." + }, "AWS::QBusiness::WebExperience IdentityProviderConfiguration": { - "OpenIDConnectConfiguration": "", - "SamlConfiguration": "" + "OpenIDConnectConfiguration": "The OIDC-compliant identity provider (IdP) used to authenticate end users of an Amazon Q Business web experience.", + "SamlConfiguration": "The SAML 2.0-compliant identity provider (IdP) used to authenticate end users of an Amazon Q Business web experience." }, "AWS::QBusiness::WebExperience OpenIDConnectProviderConfiguration": { "SecretsArn": "The Amazon Resource Name (ARN) of a Secrets Manager secret containing the OIDC client secret.", @@ -34176,6 +34956,35 @@ "AWS::QuickSight::Analysis YAxisOptions": { "YAxis": "The Y axis type to be used in the chart.\n\nIf you choose `PRIMARY_Y_AXIS` , the primary Y Axis is located on the leftmost vertical axis of the chart." }, + "AWS::QuickSight::CustomPermissions": { + "AwsAccountId": "The ID of the AWS account that contains the custom permission configuration that you want to update.", + "Capabilities": "A set of actions in the custom permissions profile.", + "CustomPermissionsName": "The name of the custom permissions profile.", + "Tags": "The tags to associate with the custom permissions profile." + }, + "AWS::QuickSight::CustomPermissions Capabilities": { + "AddOrRunAnomalyDetectionForAnalyses": "The ability to add or run anomaly detection.", + "CreateAndUpdateDashboardEmailReports": "The ability to create and update email reports.", + "CreateAndUpdateDataSources": "The ability to create and update data sources.", + "CreateAndUpdateDatasets": "The ability to create and update datasets.", + "CreateAndUpdateThemes": "The ability to export to Create and Update themes.", + "CreateAndUpdateThresholdAlerts": "The ability to create and update threshold alerts.", + "CreateSPICEDataset": "The ability to create a SPICE dataset.", + "CreateSharedFolders": "The ability to create shared folders.", + "ExportToCsv": "The ability to export to CSV files.", + "ExportToExcel": "The ability to export to Excel files.", + "RenameSharedFolders": "The ability to rename shared folders.", + "ShareAnalyses": "The ability to share analyses.", + "ShareDashboards": "The ability to share dashboards.", + "ShareDataSources": "The ability to share data sources.", + "ShareDatasets": "The ability to share datasets.", + "SubscribeDashboardEmailReports": "The ability to subscribe to email reports.", + "ViewAccountSPICECapacity": "The ability to view account SPICE capacity." + }, + "AWS::QuickSight::CustomPermissions Tag": { + "Key": "The unique key for a tag.", + "Value": "" + }, "AWS::QuickSight::Dashboard": { "AwsAccountId": "The ID of the AWS account where you want to create the dashboard.", "DashboardId": "The ID for the dashboard, also added to the IAM policy.", @@ -36866,6 +37675,7 @@ "IngestionWaitPolicy": "The wait policy to use when creating or updating a Dataset. The default is to wait for SPICE ingestion to finish with timeout of 36 hours.", "LogicalTableMap": "Configures the combination and transformation of the data from the physical tables.", "Name": "The display name for the dataset.", + "PerformanceConfiguration": "The performance optimization configuration of a dataset.", "Permissions": "A list of resource permissions on the dataset.", "PhysicalTableMap": "Declares the physical tables that are available in the underlying data sources.", "RowLevelPermissionDataSet": "The row-level security configuration for the data that you want to create.", @@ -37014,6 +37824,9 @@ "NewParameterName": "The new name for the parameter.", "ParameterName": "The name of the parameter to be overridden with different values." }, + "AWS::QuickSight::DataSet PerformanceConfiguration": { + "UniqueKeys": "" + }, "AWS::QuickSight::DataSet PhysicalTable": { "CustomSql": "A physical table type built from the results of the custom SQL query.", "RelationalTable": "A physical table type for relational data sources.", @@ -37090,17 +37903,13 @@ "TagColumnOperation": "An operation that tags a column with additional information.", "UntagColumnOperation": "" }, + "AWS::QuickSight::DataSet UniqueKey": { + "ColumnNames": "" + }, "AWS::QuickSight::DataSet UntagColumnOperation": { "ColumnName": "The column that this operation acts on.", "TagNames": "The column tags to remove from this column." }, - "AWS::QuickSight::DataSet UploadSettings": { - "ContainsHeader": "Whether the file has a header row, or the files each have a header row.", - "Delimiter": "The delimiter between values in the file.", - "Format": "File format.", - "StartFromRow": "A row number to start reading data from.", - "TextQualifier": "Text qualifier." - }, "AWS::QuickSight::DataSource": { "AlternateDataSourceParameters": "A set of alternate data source parameters that you want to share for the credentials stored with this data source. The credentials are applied in tandem with the data source parameters when you copy a data source by using a create or update request. The API operation compares the `DataSourceParameters` structure that's in the request with the structures in the `AlternateDataSourceParameters` allow list. If the structures are an exact match, the request is allowed to use the credentials from this existing data source. If the `AlternateDataSourceParameters` list is null, the `Credentials` originally used with this `DataSourceParameters` are automatically allowed.", "AwsAccountId": "The AWS account ID.", @@ -40279,7 +41088,7 @@ "AWS::RDS::DBCluster": { "AllocatedStorage": "The amount of storage in gibibytes (GiB) to allocate to each DB instance in the Multi-AZ DB cluster.\n\nValid for Cluster Type: Multi-AZ DB clusters only\n\nThis setting is required to create a Multi-AZ DB cluster.", "AssociatedRoles": "Provides a list of the AWS Identity and Access Management (IAM) roles that are associated with the DB cluster. IAM roles that are associated with a DB cluster grant permission for the DB cluster to access other Amazon Web Services on your behalf.\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", - "AutoMinorVersionUpgrade": "Specifies whether minor engine upgrades are applied automatically to the DB cluster during the maintenance window. By default, minor engine upgrades are applied automatically.\n\nValid for Cluster Type: Multi-AZ DB clusters only", + "AutoMinorVersionUpgrade": "Specifies whether minor engine upgrades are applied automatically to the DB cluster during the maintenance window. By default, minor engine upgrades are applied automatically.\n\nValid for Cluster Type: Aurora DB clusters and Multi-AZ DB cluster", "AvailabilityZones": "A list of Availability Zones (AZs) where instances in the DB cluster can be created. For information on AWS Regions and Availability Zones, see [Choosing the Regions and Availability Zones](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Concepts.RegionsAndAvailabilityZones.html) in the *Amazon Aurora User Guide* .\n\nValid for: Aurora DB clusters only", "BacktrackWindow": "The target backtrack window, in seconds. To disable backtracking, set this value to `0` .\n\nValid for Cluster Type: Aurora MySQL DB clusters only\n\nDefault: `0`\n\nConstraints:\n\n- If specified, this value must be set to a number from 0 to 259,200 (72 hours).", "BackupRetentionPeriod": "The number of days for which automated backups are retained.\n\nDefault: 1\n\nConstraints:\n\n- Must be a value from 1 to 35\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", @@ -40291,6 +41100,7 @@ "DBInstanceParameterGroupName": "The name of the DB parameter group to apply to all instances of the DB cluster.\n\n> When you apply a parameter group using the `DBInstanceParameterGroupName` parameter, the DB cluster isn't rebooted automatically. Also, parameter changes are applied immediately rather than during the next maintenance window. \n\nValid for Cluster Type: Aurora DB clusters only\n\nDefault: The existing name setting\n\nConstraints:\n\n- The DB parameter group must be in the same DB parameter group family as this DB cluster.\n- The `DBInstanceParameterGroupName` parameter is valid in combination with the `AllowMajorVersionUpgrade` parameter for a major version upgrade only.", "DBSubnetGroupName": "A DB subnet group that you want to associate with this DB cluster.\n\nIf you are restoring a DB cluster to a point in time with `RestoreType` set to `copy-on-write` , and don't specify a DB subnet group name, then the DB cluster is restored with a default DB subnet group.\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", "DBSystemId": "Reserved for future use.", + "DatabaseInsightsMode": "The mode of Database Insights to enable for the DB cluster.\n\nIf you set this value to `advanced` , you must also set the `PerformanceInsightsEnabled` parameter to `true` and the `PerformanceInsightsRetentionPeriod` parameter to 465.\n\nValid for Cluster Type: Aurora DB clusters only", "DatabaseName": "The name of your database. If you don't provide a name, then Amazon RDS won't create a database in this DB cluster. For naming constraints, see [Naming Constraints](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_Limits.html#RDS_Limits.Constraints) in the *Amazon Aurora User Guide* .\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", "DeletionProtection": "A value that indicates whether the DB cluster has deletion protection enabled. The database can't be deleted when deletion protection is enabled. By default, deletion protection is disabled.\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", "Domain": "Indicates the directory ID of the Active Directory to create the DB cluster.\n\nFor Amazon Aurora DB clusters, Amazon RDS can use Kerberos authentication to authenticate users that connect to the DB cluster.\n\nFor more information, see [Kerberos authentication](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/kerberos-authentication.html) in the *Amazon Aurora User Guide* .\n\nValid for: Aurora DB clusters only", @@ -40309,14 +41119,14 @@ "KmsKeyId": "The Amazon Resource Name (ARN) of the AWS KMS key that is used to encrypt the database instances in the DB cluster, such as `arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef` . If you enable the `StorageEncrypted` property but don't specify this property, the default KMS key is used. If you specify this property, you must set the `StorageEncrypted` property to `true` .\n\nIf you specify the `SnapshotIdentifier` property, the `StorageEncrypted` property value is inherited from the snapshot, and if the DB cluster is encrypted, the specified `KmsKeyId` property is used.\n\nIf you create a read replica of an encrypted DB cluster in another AWS Region, make sure to set `KmsKeyId` to a KMS key identifier that is valid in the destination AWS Region. This KMS key is used to encrypt the read replica in that AWS Region.\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", "ManageMasterUserPassword": "Specifies whether to manage the master user password with AWS Secrets Manager.\n\nFor more information, see [Password management with AWS Secrets Manager](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-secrets-manager.html) in the *Amazon RDS User Guide* and [Password management with AWS Secrets Manager](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/rds-secrets-manager.html) in the *Amazon Aurora User Guide.*\n\nValid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters\n\nConstraints:\n\n- Can't manage the master user password with AWS Secrets Manager if `MasterUserPassword` is specified.", "MasterUserPassword": "The master password for the DB instance.\n\n> If you specify the `SourceDBClusterIdentifier` , `SnapshotIdentifier` , or `GlobalClusterIdentifier` property, don't specify this property. The value is inherited from the source DB cluster, the snapshot, or the primary DB cluster for the global database cluster, respectively. \n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", - "MasterUserSecret": "The secret managed by RDS in AWS Secrets Manager for the master user password.\n\nFor more information, see [Password management with AWS Secrets Manager](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-secrets-manager.html) in the *Amazon RDS User Guide* and [Password management with AWS Secrets Manager](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/rds-secrets-manager.html) in the *Amazon Aurora User Guide.*", + "MasterUserSecret": "The secret managed by RDS in AWS Secrets Manager for the master user password.\n\n> When you restore a DB cluster from a snapshot, Amazon RDS generates a new secret instead of reusing the secret specified in the `SecretArn` property. This ensures that the restored DB cluster is securely managed with a dedicated secret. To maintain consistent integration with your application, you might need to update resource configurations to reference the newly created secret. \n\nFor more information, see [Password management with AWS Secrets Manager](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-secrets-manager.html) in the *Amazon RDS User Guide* and [Password management with AWS Secrets Manager](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/rds-secrets-manager.html) in the *Amazon Aurora User Guide.*", "MasterUsername": "The name of the master user for the DB cluster.\n\n> If you specify the `SourceDBClusterIdentifier` , `SnapshotIdentifier` , or `GlobalClusterIdentifier` property, don't specify this property. The value is inherited from the source DB cluster, the snapshot, or the primary DB cluster for the global database cluster, respectively. \n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", - "MonitoringInterval": "The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB cluster. To turn off collecting Enhanced Monitoring metrics, specify `0` .\n\nIf `MonitoringRoleArn` is specified, also set `MonitoringInterval` to a value other than `0` .\n\nValid for Cluster Type: Multi-AZ DB clusters only\n\nValid Values: `0 | 1 | 5 | 10 | 15 | 30 | 60`\n\nDefault: `0`", - "MonitoringRoleArn": "The Amazon Resource Name (ARN) for the IAM role that permits RDS to send Enhanced Monitoring metrics to Amazon CloudWatch Logs. An example is `arn:aws:iam:123456789012:role/emaccess` . For information on creating a monitoring role, see [Setting up and enabling Enhanced Monitoring](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Monitoring.OS.html#USER_Monitoring.OS.Enabling) in the *Amazon RDS User Guide* .\n\nIf `MonitoringInterval` is set to a value other than `0` , supply a `MonitoringRoleArn` value.\n\nValid for Cluster Type: Multi-AZ DB clusters only", + "MonitoringInterval": "The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB cluster. To turn off collecting Enhanced Monitoring metrics, specify `0` .\n\nIf `MonitoringRoleArn` is specified, also set `MonitoringInterval` to a value other than `0` .\n\nValid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters\n\nValid Values: `0 | 1 | 5 | 10 | 15 | 30 | 60`\n\nDefault: `0`", + "MonitoringRoleArn": "The Amazon Resource Name (ARN) for the IAM role that permits RDS to send Enhanced Monitoring metrics to Amazon CloudWatch Logs. An example is `arn:aws:iam:123456789012:role/emaccess` . For information on creating a monitoring role, see [Setting up and enabling Enhanced Monitoring](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Monitoring.OS.html#USER_Monitoring.OS.Enabling) in the *Amazon RDS User Guide* .\n\nIf `MonitoringInterval` is set to a value other than `0` , supply a `MonitoringRoleArn` value.\n\nValid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters", "NetworkType": "The network type of the DB cluster.\n\nValid values:\n\n- `IPV4`\n- `DUAL`\n\nThe network type is determined by the `DBSubnetGroup` specified for the DB cluster. A `DBSubnetGroup` can support only the IPv4 protocol or the IPv4 and IPv6 protocols ( `DUAL` ).\n\nFor more information, see [Working with a DB instance in a VPC](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_VPC.WorkingWithRDSInstanceinaVPC.html) in the *Amazon Aurora User Guide.*\n\nValid for: Aurora DB clusters only", - "PerformanceInsightsEnabled": "Specifies whether to turn on Performance Insights for the DB cluster.\n\nFor more information, see [Using Amazon Performance Insights](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_PerfInsights.html) in the *Amazon RDS User Guide* .\n\nValid for Cluster Type: Multi-AZ DB clusters only", - "PerformanceInsightsKmsKeyId": "The AWS KMS key identifier for encryption of Performance Insights data.\n\nThe AWS KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key.\n\nIf you don't specify a value for `PerformanceInsightsKMSKeyId` , then Amazon RDS uses your default KMS key. There is a default KMS key for your AWS account . Your AWS account has a different default KMS key for each AWS Region .\n\nValid for Cluster Type: Multi-AZ DB clusters only", - "PerformanceInsightsRetentionPeriod": "The number of days to retain Performance Insights data.\n\nValid for Cluster Type: Multi-AZ DB clusters only\n\nValid Values:\n\n- `7`\n- *month* * 31, where *month* is a number of months from 1-23. Examples: `93` (3 months * 31), `341` (11 months * 31), `589` (19 months * 31)\n- `731`\n\nDefault: `7` days\n\nIf you specify a retention period that isn't valid, such as `94` , Amazon RDS issues an error.", + "PerformanceInsightsEnabled": "Specifies whether to turn on Performance Insights for the DB cluster.\n\nFor more information, see [Using Amazon Performance Insights](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_PerfInsights.html) in the *Amazon RDS User Guide* .\n\nValid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters", + "PerformanceInsightsKmsKeyId": "The AWS KMS key identifier for encryption of Performance Insights data.\n\nThe AWS KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key.\n\nIf you don't specify a value for `PerformanceInsightsKMSKeyId` , then Amazon RDS uses your default KMS key. There is a default KMS key for your AWS account . Your AWS account has a different default KMS key for each AWS Region .\n\nValid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters", + "PerformanceInsightsRetentionPeriod": "The number of days to retain Performance Insights data.\n\nValid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters\n\nValid Values:\n\n- `7`\n- *month* * 31, where *month* is a number of months from 1-23. Examples: `93` (3 months * 31), `341` (11 months * 31), `589` (19 months * 31)\n- `731`\n\nDefault: `7` days\n\nIf you specify a retention period that isn't valid, such as `94` , Amazon RDS issues an error.", "Port": "The port number on which the DB instances in the DB cluster accept connections.\n\nDefault:\n\n- When `EngineMode` is `provisioned` , `3306` (for both Aurora MySQL and Aurora PostgreSQL)\n- When `EngineMode` is `serverless` :\n\n- `3306` when `Engine` is `aurora` or `aurora-mysql`\n- `5432` when `Engine` is `aurora-postgresql`\n\n> The `No interruption` on update behavior only applies to DB clusters. If you are updating a DB instance, see [Port](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-rds-database-instance.html#cfn-rds-dbinstance-port) for the AWS::RDS::DBInstance resource. \n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", "PreferredBackupWindow": "The daily time range during which automated backups are created. For more information, see [Backup Window](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.Managing.Backups.html#Aurora.Managing.Backups.BackupWindow) in the *Amazon Aurora User Guide.*\n\nConstraints:\n\n- Must be in the format `hh24:mi-hh24:mi` .\n- Must be in Universal Coordinated Time (UTC).\n- Must not conflict with the preferred maintenance window.\n- Must be at least 30 minutes.\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", "PreferredMaintenanceWindow": "The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).\n\nFormat: `ddd:hh24:mi-ddd:hh24:mi`\n\nThe default is a 30-minute window selected at random from an 8-hour block of time for each AWS Region, occurring on a random day of the week. To see the time blocks available, see [Adjusting the Preferred DB Cluster Maintenance Window](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_UpgradeDBInstance.Maintenance.html#AdjustingTheMaintenanceWindow.Aurora) in the *Amazon Aurora User Guide.*\n\nValid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun.\n\nConstraints: Minimum 30-minute window.\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", @@ -40361,7 +41171,8 @@ }, "AWS::RDS::DBCluster ServerlessV2ScalingConfiguration": { "MaxCapacity": "The maximum number of Aurora capacity units (ACUs) for a DB instance in an Aurora Serverless v2 cluster. You can specify ACU values in half-step increments, such as 40, 40.5, 41, and so on. The largest value that you can use is 128.\n\nThe maximum capacity must be higher than 0.5 ACUs. For more information, see [Choosing the maximum Aurora Serverless v2 capacity setting for a cluster](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless-v2.setting-capacity.html#aurora-serverless-v2.max_capacity_considerations) in the *Amazon Aurora User Guide* .\n\nAurora automatically sets certain parameters for Aurora Serverless V2 DB instances to values that depend on the maximum ACU value in the capacity range. When you update the maximum capacity value, the `ParameterApplyStatus` value for the DB instance changes to `pending-reboot` . You can update the parameter values by rebooting the DB instance after changing the capacity range.", - "MinCapacity": "The minimum number of Aurora capacity units (ACUs) for a DB instance in an Aurora Serverless v2 cluster. You can specify ACU values in half-step increments, such as 8, 8.5, 9, and so on. For Aurora versions that support the Aurora Serverless v2 auto-pause feature, the smallest value that you can use is 0. For versions that don't support Aurora Serverless v2 auto-pause, the smallest value that you can use is 0.5." + "MinCapacity": "The minimum number of Aurora capacity units (ACUs) for a DB instance in an Aurora Serverless v2 cluster. You can specify ACU values in half-step increments, such as 8, 8.5, 9, and so on. For Aurora versions that support the Aurora Serverless v2 auto-pause feature, the smallest value that you can use is 0. For versions that don't support Aurora Serverless v2 auto-pause, the smallest value that you can use is 0.5.", + "SecondsUntilAutoPause": "Specifies the number of seconds an Aurora Serverless v2 DB instance must be idle before Aurora attempts to automatically pause it.\n\nSpecify a value between 300 seconds (five minutes) and 86,400 seconds (one day). The default is 300 seconds." }, "AWS::RDS::DBCluster Tag": { "Key": "A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and can't be prefixed with `aws:` or `rds:` . The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\").", @@ -40700,13 +41511,13 @@ }, "AWS::Rbin::Rule": { "Description": "The retention rule description.", - "ExcludeResourceTags": "", + "ExcludeResourceTags": "[Region-level retention rules only] Specifies the exclusion tags to use to identify resources that are to be excluded, or ignored, by a Region-level retention rule. Resources that have any of these tags are not retained by the retention rule upon deletion.\n\nYou can't specify exclusion tags for tag-level retention rules.", "LockConfiguration": "Information about the retention rule lock configuration.", - "ResourceTags": "[Tag-level retention rules only] Information about the resource tags used to identify resources that are retained by the retention rule.", - "ResourceType": "The resource type retained by the retention rule.", + "ResourceTags": "[Tag-level retention rules only] Specifies the resource tags to use to identify resources that are to be retained by a tag-level retention rule. For tag-level retention rules, only deleted resources, of the specified resource type, that have one or more of the specified tag key and value pairs are retained. If a resource is deleted, but it does not have any of the specified tag key and value pairs, it is immediately deleted without being retained by the retention rule.\n\nYou can add the same tag key and value pair to a maximum or five retention rules.\n\nTo create a Region-level retention rule, omit this parameter. A Region-level retention rule does not have any resource tags specified. It retains all deleted resources of the specified resource type in the Region in which the rule is created, even if the resources are not tagged.", + "ResourceType": "The resource type to be retained by the retention rule. Currently, only Amazon EBS snapshots and EBS-backed AMIs are supported. To retain snapshots, specify `EBS_SNAPSHOT` . To retain EBS-backed AMIs, specify `EC2_IMAGE` .", "RetentionPeriod": "Information about the retention period for which the retention rule is to retain resources.", "Status": "The state of the retention rule. Only retention rules that are in the `available` state retain resources.", - "Tags": "Information about the tags assigned to the retention rule." + "Tags": "Information about the tags to assign to the retention rule." }, "AWS::Rbin::Rule ResourceTag": { "ResourceTagKey": "The tag key.", @@ -40961,6 +41772,7 @@ "MaxCapacity": "The maximum data-warehouse capacity Amazon Redshift Serverless uses to serve queries. The max capacity is specified in RPUs.", "NamespaceName": "The namespace the workgroup is associated with.", "Port": "The custom port to use when connecting to a workgroup. Valid port ranges are 5431-5455 and 8191-8215. The default is 5439.", + "PricePerformanceTarget": "An object that represents the price performance target settings for the workgroup.", "PubliclyAccessible": "A value that specifies whether the workgroup can be accessible from a public network.", "SecurityGroupIds": "A list of security group IDs to associate with the workgroup.", "SubnetIds": "A list of subnet IDs the workgroup is associated with.", @@ -40982,6 +41794,10 @@ "PrivateIpAddress": "The IPv4 address of the network interface within the subnet.", "SubnetId": "The unique identifier of the subnet." }, + "AWS::RedshiftServerless::Workgroup PerformanceTarget": { + "Level": "The target price performance level for the workgroup. Valid values include 1, 25, 50, 75, and 100. These correspond to the price performance levels LOW_COST, ECONOMICAL, BALANCED, RESOURCEFUL, and HIGH_PERFORMANCE.", + "Status": "Whether the price performance target is enabled for the workgroup." + }, "AWS::RedshiftServerless::Workgroup Tag": { "Key": "The key to use in the tag.", "Value": "The value of the tag." @@ -40999,6 +41815,7 @@ "EnhancedVpcRouting": "The value that specifies whether to enable enhanced virtual private cloud (VPC) routing, which forces Amazon Redshift Serverless to route traffic through your VPC.", "MaxCapacity": "The maximum data-warehouse capacity Amazon Redshift Serverless uses to serve queries. The max capacity is specified in RPUs.", "NamespaceName": "The namespace the workgroup is associated with.", + "PricePerformanceTarget": "An object that represents the price performance target settings for the workgroup.", "PubliclyAccessible": "A value that specifies whether the workgroup can be accessible from a public network.", "SecurityGroupIds": "An array of security group IDs to associate with the workgroup.", "Status": "The status of the workgroup.", @@ -41160,7 +41977,7 @@ }, "AWS::ResilienceHub::App PermissionModel": { "CrossAccountRoleArns": "Defines a list of role Amazon Resource Names (ARNs) to be used in other accounts. These ARNs are used for querying purposes while importing resources and assessing your application.\n\n> - These ARNs are required only when your resources are in other accounts and you have different role name in these accounts. Else, the invoker role name will be used in the other accounts.\n> - These roles must have a trust policy with `iam:AssumeRole` permission to the invoker role in the primary account.", - "InvokerRoleName": "Existing AWS IAM role name in the primary AWS account that will be assumed by AWS Resilience Hub Service Principle to obtain a read-only access to your application resources while running an assessment.\n\n> - You must have `iam:passRole` permission for this role while creating or updating the application.\n> - Currently, `invokerRoleName` accepts only `[A-Za-z0-9_+=,.@-]` characters.", + "InvokerRoleName": "Existing AWS IAM role name in the primary AWS account that will be assumed by AWS Resilience Hub Service Principle to obtain a read-only access to your application resources while running an assessment.\n\nIf your IAM role includes a path, you must include the path in the `invokerRoleName` parameter. For example, if your IAM role's ARN is `arn:aws:iam:123456789012:role/my-path/role-name` , you should pass `my-path/role-name` .\n\n> - You must have `iam:passRole` permission for this role while creating or updating the application.\n> - Currently, `invokerRoleName` accepts only `[A-Za-z0-9_+=,.@-]` characters.", "Type": "Defines how AWS Resilience Hub scans your resources. It can scan for the resources by using a pre-existing role in your AWS account, or by using the credentials of the current IAM user." }, "AWS::ResilienceHub::App PhysicalResourceId": { @@ -41848,7 +42665,7 @@ "AccessControl": "> This is a legacy property, and it is not recommended for most use cases. A majority of modern use cases in Amazon S3 no longer require the use of ACLs, and we recommend that you keep ACLs disabled. For more information, see [Controlling object ownership](https://docs.aws.amazon.com//AmazonS3/latest/userguide/about-object-ownership.html) in the *Amazon S3 User Guide* . \n\nA canned access control list (ACL) that grants predefined permissions to the bucket. For more information about canned ACLs, see [Canned ACL](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl) in the *Amazon S3 User Guide* .\n\nS3 buckets are created with ACLs disabled by default. Therefore, unless you explicitly set the [AWS::S3::OwnershipControls](https://docs.aws.amazon.com//AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-ownershipcontrols.html) property to enable ACLs, your resource will fail to deploy with any value other than Private. Use cases requiring ACLs are uncommon.\n\nThe majority of access control configurations can be successfully and more easily achieved with bucket policies. For more information, see [AWS::S3::BucketPolicy](https://docs.aws.amazon.com//AWSCloudFormation/latest/UserGuide/aws-properties-s3-policy.html) . For examples of common policy configurations, including S3 Server Access Logs buckets and more, see [Bucket policy examples](https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html) in the *Amazon S3 User Guide* .", "AnalyticsConfigurations": "Specifies the configuration and any analyses for the analytics filter of an Amazon S3 bucket.", "BucketEncryption": "Specifies default encryption for a bucket using server-side encryption with Amazon S3-managed keys (SSE-S3), AWS KMS-managed keys (SSE-KMS), or dual-layer server-side encryption with KMS-managed keys (DSSE-KMS). For information about the Amazon S3 default encryption feature, see [Amazon S3 Default Encryption for S3 Buckets](https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) in the *Amazon S3 User Guide* .", - "BucketName": "A name for the bucket. If you don't specify a name, AWS CloudFormation generates a unique ID and uses that ID for the bucket name. The bucket name must contain only lowercase letters, numbers, periods (.), and dashes (-) and must follow [Amazon S3 bucket restrictions and limitations](https://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html) . For more information, see [Rules for naming Amazon S3 buckets](https://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html#bucketnamingrules) in the *Amazon S3 User Guide* .\n\n> If you specify a name, you can't perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you need to replace the resource, specify a new name.", + "BucketName": "A name for the bucket. If you don't specify a name, AWS CloudFormation generates a unique ID and uses that ID for the bucket name. The bucket name must contain only lowercase letters, numbers, periods (.), and dashes (-) and must follow [Amazon S3 bucket restrictions and limitations](https://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html) . For more information, see [Rules for naming Amazon S3 buckets](https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html) in the *Amazon S3 User Guide* .\n\n> If you specify a name, you can't perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you need to replace the resource, specify a new name.", "CorsConfiguration": "Describes the cross-origin access configuration for objects in an Amazon S3 bucket. For more information, see [Enabling Cross-Origin Resource Sharing](https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the *Amazon S3 User Guide* .", "IntelligentTieringConfigurations": "Defines how Amazon S3 handles Intelligent-Tiering storage.", "InventoryConfigurations": "Specifies the inventory configuration for an Amazon S3 bucket. For more information, see [GET Bucket inventory](https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETInventoryConfig.html) in the *Amazon S3 API Reference* .", @@ -42307,10 +43124,10 @@ }, "AWS::S3Express::DirectoryBucket": { "BucketEncryption": "Specifies default encryption for a bucket using server-side encryption with Amazon S3 managed keys (SSE-S3) or AWS KMS keys (SSE-KMS). For information about default encryption for directory buckets, see [Setting and monitoring default encryption for directory buckets](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-bucket-encryption.html) in the *Amazon S3 User Guide* .", - "BucketName": "A name for the bucket. The bucket name must contain only lowercase letters, numbers, and hyphens (-). A directory bucket name must be unique in the chosen Availability Zone. The bucket name must also follow the format `*bucket_base_name* -- *az_id* --x-s3` (for example, `*bucket_base_name* -- *usw2-az1* --x-s3` ). If you don't specify a name, AWS CloudFormation generates a unique ID and uses that ID for the bucket name. For information about bucket naming restrictions, see [Directory bucket naming rules](https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) in the *Amazon S3 User Guide* .\n\n> If you specify a name, you can't perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you need to replace the resource, specify a new name.", - "DataRedundancy": "The number of Availability Zone that's used for redundancy for the bucket.", + "BucketName": "A name for the bucket. The bucket name must contain only lowercase letters, numbers, and hyphens (-). A directory bucket name must be unique in the chosen Zone (Availability Zone or Local Zone). The bucket name must also follow the format `*bucket_base_name* -- *zone_id* --x-s3` (for example, `*bucket_base_name* -- *usw2-az1* --x-s3` ). If you don't specify a name, AWS CloudFormation generates a unique ID and uses that ID for the bucket name. For information about bucket naming restrictions, see [Directory bucket naming rules](https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) in the *Amazon S3 User Guide* .\n\n> If you specify a name, you can't perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you need to replace the resource, specify a new name.", + "DataRedundancy": "The number of Zone (Availability Zone or Local Zone) that's used for redundancy for the bucket.", "LifecycleConfiguration": "Container for lifecycle rules. You can add as many as 1000 rules.\n\nFor more information see, [Creating and managing a lifecycle configuration for directory buckets](https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-objects-lifecycle.html ) in the *Amazon S3 User Guide* .", - "LocationName": "The name of the location where the bucket will be created.\n\nFor directory buckets, the name of the location is the AZ ID of the Availability Zone where the bucket will be created. An example AZ ID value is `usw2-az1` ." + "LocationName": "The name of the location where the bucket will be created.\n\nFor directory buckets, the name of the location is the Zone ID of the Availability Zone (AZ) or Local Zone (LZ) where the bucket will be created. An example AZ ID value is `usw2-az1` ." }, "AWS::S3Express::DirectoryBucket AbortIncompleteMultipartUpload": { "DaysAfterInitiation": "Specifies the number of days after which Amazon S3 aborts an incomplete multipart upload." @@ -42438,6 +43255,19 @@ "AWS::S3Outposts::Endpoint NetworkInterface": { "NetworkInterfaceId": "The ID for the network interface." }, + "AWS::S3Tables::TableBucket": { + "TableBucketName": "The name for the table bucket.", + "UnreferencedFileRemoval": "The unreferenced file removal settings for your table bucket. Unreferenced file removal identifies and deletes all objects that are not referenced by any table snapshots. For more information, see the [*Amazon S3 User Guide*](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-table-buckets-maintenance.html) ." + }, + "AWS::S3Tables::TableBucket UnreferencedFileRemoval": { + "NoncurrentDays": "The number of days an object can be noncurrent before Amazon S3 deletes it.", + "Status": "The status of the unreferenced file removal configuration for your table bucket.", + "UnreferencedDays": "The number of days an object must be unreferenced by your table before Amazon S3 marks the object as noncurrent." + }, + "AWS::S3Tables::TableBucketPolicy": { + "ResourcePolicy": "The bucket policy JSON for the table bucket.", + "TableBucketARN": "The Amazon Resource Name (ARN) of the table bucket." + }, "AWS::SDB::Domain": { "Description": "Information about the SimpleDB domain." }, @@ -42637,6 +43467,12 @@ "MailboxArn": "The Amazon Resource Name (ARN) of a WorkMail organization to deliver the email to.", "RoleArn": "The Amazon Resource Name (ARN) of an IAM role to use to execute this action. The role must have access to the workmail:DeliverToMailbox API." }, + "AWS::SES::MailManagerRuleSet DeliverToQBusinessAction": { + "ActionFailurePolicy": "A policy that states what to do in the case of failure. The action will fail if there are configuration errors. For example, the specified application has been deleted or the role lacks necessary permissions to call the qbusiness:BatchPutDocument API.", + "ApplicationId": "The unique identifier of the Amazon Q Business application instance where the email content will be delivered.", + "IndexId": "The identifier of the knowledge base index within the Amazon Q Business application where the email content will be stored and indexed.", + "RoleArn": "The Amazon Resource Name (ARN) of the IAM Role to use while delivering to Amazon Q Business. This role must have access to the qbusiness:BatchPutDocument API for the given application and index." + }, "AWS::SES::MailManagerRuleSet RelayAction": { "ActionFailurePolicy": "A policy that states what to do in the case of failure. The action will fail if there are configuration errors. For example, the specified relay has been deleted.", "MailFrom": "This action specifies whether to preserve or replace original mail from address while relaying received emails to a destination server.", @@ -42655,6 +43491,7 @@ "AddHeader": "This action adds a header. This can be used to add arbitrary email headers.", "Archive": "This action archives the email. This can be used to deliver an email to an archive.", "DeliverToMailbox": "This action delivers an email to a WorkMail mailbox.", + "DeliverToQBusiness": "This action delivers an email to an Amazon Q Business application for ingestion into its knowledge base.", "Drop": "This action terminates the evaluation of rules in the rule set.", "Relay": "This action relays the email to another SMTP server.", "ReplaceRecipient": "The action replaces certain or all recipients with a different set of recipients.", @@ -42804,6 +43641,7 @@ "AWS::SES::ReceiptRule Action": { "AddHeaderAction": "Adds a header to the received email.", "BounceAction": "Rejects the received email by returning a bounce response to the sender and, optionally, publishes a notification to Amazon Simple Notification Service (Amazon SNS).", + "ConnectAction": "", "LambdaAction": "Calls an AWS Lambda function, and optionally, publishes a notification to Amazon SNS.", "S3Action": "Saves the received message to an Amazon Simple Storage Service (Amazon S3) bucket and, optionally, publishes a notification to Amazon SNS.", "SNSAction": "Publishes the email content within a notification to Amazon SNS.", @@ -42821,6 +43659,10 @@ "StatusCode": "The SMTP enhanced status code, as defined by [RFC 3463](https://docs.aws.amazon.com/https://tools.ietf.org/html/rfc3463) .", "TopicArn": "The Amazon Resource Name (ARN) of the Amazon SNS topic to notify when the bounce action is taken. You can find the ARN of a topic by using the [ListTopics](https://docs.aws.amazon.com/sns/latest/api/API_ListTopics.html) operation in Amazon SNS.\n\nFor more information about Amazon SNS topics, see the [Amazon SNS Developer Guide](https://docs.aws.amazon.com/sns/latest/dg/CreateTopic.html) ." }, + "AWS::SES::ReceiptRule ConnectAction": { + "IAMRoleARN": "The Amazon Resource Name (ARN) of the IAM role to be used by Amazon Simple Email Service while starting email contacts to the Amazon Connect instance. This role should have permission to invoke `connect:StartEmailContact` for the given Amazon Connect instance.", + "InstanceARN": "The Amazon Resource Name (ARN) for the Amazon Connect instance that Amazon SES integrates with for starting email contacts.\n\nFor more information about Amazon Connect instances, see the [Amazon Connect Administrator Guide](https://docs.aws.amazon.com/connect/latest/adminguide/amazon-connect-instances.html)" + }, "AWS::SES::ReceiptRule LambdaAction": { "FunctionArn": "The Amazon Resource Name (ARN) of the AWS Lambda function. An example of an AWS Lambda function ARN is `arn:aws:lambda:us-west-2:account-id:function:MyFunction` . For more information about AWS Lambda, see the [AWS Lambda Developer Guide](https://docs.aws.amazon.com/lambda/latest/dg/welcome.html) .", "InvocationType": "The invocation type of the AWS Lambda function. An invocation type of `RequestResponse` means that the execution of the function immediately results in a response, and a value of `Event` means that the function is invoked asynchronously. The default value is `Event` . For information about AWS Lambda invocation types, see the [AWS Lambda Developer Guide](https://docs.aws.amazon.com/lambda/latest/dg/API_Invoke.html) .\n\n> There is a 30-second timeout on `RequestResponse` invocations. You should use `Event` invocation in most cases. Use `RequestResponse` only to make a mail flow decision, such as whether to stop the receipt rule or the receipt rule set.", @@ -42879,7 +43721,7 @@ "DeliveryPolicy": "The delivery policy JSON assigned to the subscription. Enables the subscriber to define the message delivery retry strategy in the case of an HTTP/S endpoint subscribed to the topic. For more information, see `[GetSubscriptionAttributes](https://docs.aws.amazon.com/sns/latest/api/API_GetSubscriptionAttributes.html)` in the *Amazon SNS API Reference* and [Message delivery retries](https://docs.aws.amazon.com/sns/latest/dg/sns-message-delivery-retries.html) in the *Amazon SNS Developer Guide* .", "Endpoint": "The subscription's endpoint. The endpoint value depends on the protocol that you specify. For more information, see the `Endpoint` parameter of the `[Subscribe](https://docs.aws.amazon.com/sns/latest/api/API_Subscribe.html)` action in the *Amazon SNS API Reference* .", "FilterPolicy": "The filter policy JSON assigned to the subscription. Enables the subscriber to filter out unwanted messages. For more information, see `[GetSubscriptionAttributes](https://docs.aws.amazon.com/sns/latest/api/API_GetSubscriptionAttributes.html)` in the *Amazon SNS API Reference* and [Message filtering](https://docs.aws.amazon.com/sns/latest/dg/sns-message-filtering.html) in the *Amazon SNS Developer Guide* .", - "FilterPolicyScope": "This attribute lets you choose the filtering scope by using one of the following string value types:\n\n- `MessageAttributes` (default) - The filter is applied on the message attributes.\n- `MessageBody` - The filter is applied on the message body.", + "FilterPolicyScope": "This attribute lets you choose the filtering scope by using one of the following string value types:\n\n- `MessageAttributes` (default) - The filter is applied on the message attributes.\n- `MessageBody` - The filter is applied on the message body.\n\n> `Null` is not a valid value for `FilterPolicyScope` . To delete a filter policy, delete the `FilterPolicy` property but keep `FilterPolicyScope` property as is.", "Protocol": "The subscription's protocol. For more information, see the `Protocol` parameter of the `[Subscribe](https://docs.aws.amazon.com/sns/latest/api/API_Subscribe.html)` action in the *Amazon SNS API Reference* .", "RawMessageDelivery": "When set to `true` , enables raw message delivery. Raw messages don't contain any JSON formatting and can be sent to Amazon SQS and HTTP/S endpoints. For more information, see `[GetSubscriptionAttributes](https://docs.aws.amazon.com/sns/latest/api/API_GetSubscriptionAttributes.html)` in the *Amazon SNS API Reference* .", "RedrivePolicy": "When specified, sends undeliverable messages to the specified Amazon SQS dead-letter queue. Messages that can't be delivered due to client errors (for example, when the subscribed endpoint is unreachable) or server errors (for example, when the service that powers the subscribed endpoint becomes unavailable) are held in the dead-letter queue for further analysis or reprocessing.\n\nFor more information about the redrive policy and dead-letter queues, see [Amazon SQS dead-letter queues](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html) in the *Amazon SQS Developer Guide* .", @@ -43462,7 +44304,7 @@ "AWS::SageMaker::App ResourceSpec": { "InstanceType": "The instance type that the image version runs on.\n\n> *JupyterServer apps* only support the `system` value.\n> \n> For *KernelGateway apps* , the `system` value is translated to `ml.t3.medium` . KernelGateway apps also support all other values for available instance types.", "LifecycleConfigArn": "The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource.", - "SageMakerImageArn": "The ARN of the SageMaker image that the image version belongs to.", + "SageMakerImageArn": "The ARN of the SageMaker AI image that the image version belongs to.", "SageMakerImageVersionArn": "The ARN of the image version created on the instance." }, "AWS::SageMaker::App Tag": { @@ -43473,7 +44315,7 @@ "AppImageConfigName": "The name of the AppImageConfig. Must be unique to your account.", "CodeEditorAppImageConfig": "The configuration for the file system and the runtime, such as the environment variables and entry point.", "JupyterLabAppImageConfig": "The configuration for the file system and the runtime, such as the environment variables and entry point.", - "KernelGatewayImageConfig": "The configuration for the file system and kernels in the SageMaker image.", + "KernelGatewayImageConfig": "The configuration for the file system and kernels in the SageMaker AI image.", "Tags": "An array of key-value pairs to apply to this resource.\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html) ." }, "AWS::SageMaker::AppImageConfig CodeEditorAppImageConfig": { @@ -43497,7 +44339,7 @@ "ContainerConfig": "The configuration used to run the application image container." }, "AWS::SageMaker::AppImageConfig KernelGatewayImageConfig": { - "FileSystemConfig": "The Amazon Elastic File System storage configuration for a SageMaker image.", + "FileSystemConfig": "The Amazon Elastic File System storage configuration for a SageMaker AI image.", "KernelSpecs": "The specification of the Jupyter kernels in the image." }, "AWS::SageMaker::AppImageConfig KernelSpec": { @@ -43635,7 +44477,7 @@ "S3Output": "The Amazon S3 storage location where the results of a monitoring job are saved." }, "AWS::SageMaker::DataQualityJobDefinition MonitoringOutputConfig": { - "KmsKeyId": "The AWS Key Management Service ( AWS KMS ) key that Amazon SageMaker uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption.", + "KmsKeyId": "The AWS Key Management Service ( AWS KMS ) key that Amazon SageMaker AI uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption.", "MonitoringOutputs": "Monitoring outputs for monitoring jobs. This is where the output of the periodic monitoring jobs is uploaded." }, "AWS::SageMaker::DataQualityJobDefinition MonitoringResources": { @@ -43695,7 +44537,7 @@ "Value": "The tag value." }, "AWS::SageMaker::Domain": { - "AppNetworkAccessType": "Specifies the VPC used for non-EFS traffic. The default value is `PublicInternetOnly` .\n\n- `PublicInternetOnly` - Non-EFS traffic is through a VPC managed by Amazon SageMaker , which allows direct internet access\n- `VpcOnly` - All Studio traffic is through the specified VPC and subnets\n\n*Valid Values* : `PublicInternetOnly | VpcOnly`", + "AppNetworkAccessType": "Specifies the VPC used for non-EFS traffic. The default value is `PublicInternetOnly` .\n\n- `PublicInternetOnly` - Non-EFS traffic is through a VPC managed by Amazon SageMaker AI , which allows direct internet access\n- `VpcOnly` - All Studio traffic is through the specified VPC and subnets\n\n*Valid Values* : `PublicInternetOnly | VpcOnly`", "AppSecurityGroupManagement": "The entity that creates and manages the required security groups for inter-app communication in `VpcOnly` mode. Required when `CreateDomain.AppNetworkAccessType` is `VpcOnly` and `DomainSettings.RStudioServerProDomainSettings.DomainExecutionRoleArn` is provided. If setting up the domain for use with RStudio, this value must be set to `Service` .\n\n*Allowed Values* : `Service` | `Customer`", "AuthMode": "The mode of authentication that members use to access the Domain.\n\n*Valid Values* : `SSO | IAM`", "DefaultSpaceSettings": "The default settings for shared spaces that users create in the domain.\n\nSageMaker applies these settings only to shared spaces. It doesn't apply them to private spaces.", @@ -43721,7 +44563,8 @@ "RepositoryUrl": "The URL of the Git repository." }, "AWS::SageMaker::Domain CustomFileSystemConfig": { - "EFSFileSystemConfig": "The settings for a custom Amazon EFS file system." + "EFSFileSystemConfig": "The settings for a custom Amazon EFS file system.", + "FSxLustreFileSystemConfig": "The settings for a custom Amazon FSx for Lustre file system." }, "AWS::SageMaker::Domain CustomImage": { "AppImageConfigName": "The name of the AppImageConfig.", @@ -43737,7 +44580,7 @@ "MaximumEbsVolumeSizeInGb": "The maximum size of the EBS storage volume for a space." }, "AWS::SageMaker::Domain DefaultSpaceSettings": { - "CustomFileSystemConfigs": "The settings for assigning a custom file system to a domain. Permitted users can access this file system in Amazon SageMaker Studio.", + "CustomFileSystemConfigs": "The settings for assigning a custom file system to a domain. Permitted users can access this file system in Amazon SageMaker AI Studio.", "CustomPosixUserConfig": "", "ExecutionRole": "The ARN of the execution role for the space.", "JupyterLabAppSettings": "", @@ -43755,12 +44598,16 @@ }, "AWS::SageMaker::Domain DomainSettings": { "DockerSettings": "A collection of settings that configure the domain's Docker interaction.", - "ExecutionRoleIdentityConfig": "The configuration for attaching a SageMaker user profile name to the execution role as a [sts:SourceIdentity key](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) .", + "ExecutionRoleIdentityConfig": "The configuration for attaching a SageMaker AI user profile name to the execution role as a [sts:SourceIdentity key](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) .", "RStudioServerProDomainSettings": "A collection of settings that configure the `RStudioServerPro` Domain-level app.", "SecurityGroupIds": "The security groups for the Amazon Virtual Private Cloud that the `Domain` uses for communication between Domain-level apps and user apps." }, "AWS::SageMaker::Domain EFSFileSystemConfig": { "FileSystemId": "The ID of your Amazon EFS file system.", + "FileSystemPath": "The path to the file system directory that is accessible in Amazon SageMaker AI Studio. Permitted users can access only this directory and below." + }, + "AWS::SageMaker::Domain FSxLustreFileSystemConfig": { + "FileSystemId": "The globally unique, 17-digit, ID of the file system, assigned by Amazon FSx for Lustre.", "FileSystemPath": "The path to the file system directory that is accessible in Amazon SageMaker Studio. Permitted users can access only this directory and below." }, "AWS::SageMaker::Domain IdleSettings": { @@ -43781,12 +44628,12 @@ "LifecycleConfigArns": "The Amazon Resource Name (ARN) of the Lifecycle Configurations attached to the JupyterServerApp. If you use this parameter, the `DefaultResourceSpec` parameter is also required.\n\n> To remove a Lifecycle Config, you must set `LifecycleConfigArns` to an empty list." }, "AWS::SageMaker::Domain KernelGatewayAppSettings": { - "CustomImages": "A list of custom SageMaker images that are configured to run as a KernelGateway app.", - "DefaultResourceSpec": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the KernelGateway app.\n\n> The Amazon SageMaker Studio UI does not use the default instance type value set here. The default instance type set here is used when Apps are created using the AWS CLI or AWS CloudFormation and the instance type parameter value is not passed.", + "CustomImages": "A list of custom SageMaker AI images that are configured to run as a KernelGateway app.", + "DefaultResourceSpec": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker AI image used by the KernelGateway app.\n\n> The Amazon SageMaker AI Studio UI does not use the default instance type value set here. The default instance type set here is used when Apps are created using the AWS CLI or AWS CloudFormation and the instance type parameter value is not passed.", "LifecycleConfigArns": "The Amazon Resource Name (ARN) of the Lifecycle Configurations attached to the the user profile or domain.\n\n> To remove a Lifecycle Config, you must set `LifecycleConfigArns` to an empty list." }, "AWS::SageMaker::Domain RSessionAppSettings": { - "CustomImages": "A list of custom SageMaker images that are configured to run as a RSession app.", + "CustomImages": "A list of custom SageMaker AI images that are configured to run as a RSession app.", "DefaultResourceSpec": "Specifies the ARNs of a SageMaker image and SageMaker image version, and the instance type that the version runs on." }, "AWS::SageMaker::Domain RStudioServerProAppSettings": { @@ -43802,7 +44649,7 @@ "AWS::SageMaker::Domain ResourceSpec": { "InstanceType": "The instance type that the image version runs on.\n\n> *JupyterServer apps* only support the `system` value.\n> \n> For *KernelGateway apps* , the `system` value is translated to `ml.t3.medium` . KernelGateway apps also support all other values for available instance types.", "LifecycleConfigArn": "The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource.", - "SageMakerImageArn": "The ARN of the SageMaker image that the image version belongs to.", + "SageMakerImageArn": "The ARN of the SageMaker AI image that the image version belongs to.", "SageMakerImageVersionArn": "The ARN of the image version created on the instance." }, "AWS::SageMaker::Domain SharingSettings": { @@ -43820,7 +44667,7 @@ }, "AWS::SageMaker::Domain UserSettings": { "CodeEditorAppSettings": "The Code Editor application settings.\n\nSageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.", - "CustomFileSystemConfigs": "The settings for assigning a custom file system to a user profile. Permitted users can access this file system in Amazon SageMaker Studio.\n\nSageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.", + "CustomFileSystemConfigs": "The settings for assigning a custom file system to a user profile. Permitted users can access this file system in Amazon SageMaker AI Studio.\n\nSageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.", "CustomPosixUserConfig": "Details about the POSIX identity that is used for file system operations.\n\nSageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.", "DefaultLandingUri": "The default experience that the user is directed to when accessing the domain. The supported values are:\n\n- `studio::` : Indicates that Studio is the default experience. This value can only be passed if `StudioWebPortal` is set to `ENABLED` .\n- `app:JupyterServer:` : Indicates that Studio Classic is the default experience.", "ExecutionRole": "The execution role for the user.\n\nSageMaker applies this setting only to private spaces that the user creates in the domain. SageMaker doesn't apply this setting to shared spaces.", @@ -43829,8 +44676,8 @@ "KernelGatewayAppSettings": "The kernel gateway app settings.", "RSessionAppSettings": "A collection of settings that configure the `RSessionGateway` app.", "RStudioServerProAppSettings": "A collection of settings that configure user interaction with the `RStudioServerPro` app.", - "SecurityGroups": "The security groups for the Amazon Virtual Private Cloud (VPC) that the domain uses for communication.\n\nOptional when the `CreateDomain.AppNetworkAccessType` parameter is set to `PublicInternetOnly` .\n\nRequired when the `CreateDomain.AppNetworkAccessType` parameter is set to `VpcOnly` , unless specified as part of the `DefaultUserSettings` for the domain.\n\nAmazon SageMaker adds a security group to allow NFS traffic from Amazon SageMaker Studio. Therefore, the number of security groups that you can specify is one less than the maximum number shown.\n\nSageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.", - "SharingSettings": "Specifies options for sharing Amazon SageMaker Studio notebooks.", + "SecurityGroups": "The security groups for the Amazon Virtual Private Cloud (VPC) that the domain uses for communication.\n\nOptional when the `CreateDomain.AppNetworkAccessType` parameter is set to `PublicInternetOnly` .\n\nRequired when the `CreateDomain.AppNetworkAccessType` parameter is set to `VpcOnly` , unless specified as part of the `DefaultUserSettings` for the domain.\n\nAmazon SageMaker AI adds a security group to allow NFS traffic from Amazon SageMaker AI Studio. Therefore, the number of security groups that you can specify is one less than the maximum number shown.\n\nSageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.", + "SharingSettings": "Specifies options for sharing Amazon SageMaker AI Studio notebooks.", "SpaceStorageSettings": "The storage settings for a space.\n\nSageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.", "StudioWebPortal": "Whether the user can access Studio. If this value is set to `DISABLED` , the user cannot access Studio, even if that is the default experience for the domain.", "StudioWebPortalSettings": "Studio settings. If these settings are applied on a user level, they take priority over the settings applied on a domain level." @@ -44097,7 +44944,7 @@ "BaseInferenceComponentName": "The name of an existing inference component that is to contain the inference component that you're creating with your request.\n\nSpecify this parameter only if your request is meant to create an adapter inference component. An adapter inference component contains the path to an adapter model. The purpose of the adapter model is to tailor the inference output of a base foundation model, which is hosted by the base inference component. The adapter inference component uses the compute resources that you assigned to the base inference component.\n\nWhen you create an adapter inference component, use the `Container` parameter to specify the location of the adapter artifacts. In the parameter value, use the `ArtifactUrl` parameter of the `InferenceComponentContainerSpecification` data type.\n\nBefore you can create an adapter inference component, you must have an existing inference component that contains the foundation model that you want to adapt.", "ComputeResourceRequirements": "The compute resources allocated to run the model, plus any adapter models, that you assign to the inference component.\n\nOmit this parameter if your request is meant to create an adapter inference component. An adapter inference component is loaded by a base inference component, and it uses the compute resources of the base inference component.", "Container": "Defines a container that provides the runtime environment for a model that you deploy with an inference component.", - "ModelName": "The name of an existing SageMaker model object in your account that you want to deploy with the inference component.", + "ModelName": "The name of an existing SageMaker AI model object in your account that you want to deploy with the inference component.", "StartupParameters": "Settings that take effect while the model container starts up." }, "AWS::SageMaker::InferenceComponent InferenceComponentStartupParameters": { @@ -44124,8 +44971,8 @@ "Type": "The type of the inference experiment." }, "AWS::SageMaker::InferenceExperiment CaptureContentTypeHeader": { - "CsvContentTypes": "The list of all content type headers that Amazon SageMaker will treat as CSV and capture accordingly.", - "JsonContentTypes": "The list of all content type headers that SageMaker will treat as JSON and capture accordingly." + "CsvContentTypes": "The list of all content type headers that Amazon SageMaker AI will treat as CSV and capture accordingly.", + "JsonContentTypes": "The list of all content type headers that SageMaker AI will treat as JSON and capture accordingly." }, "AWS::SageMaker::InferenceExperiment DataStorageConfig": { "ContentType": "Configuration specifying how to treat different headers. If no headers are specified SageMaker will by default base64 encode when capturing the data.", @@ -44318,7 +45165,7 @@ "S3Output": "The Amazon S3 storage location where the results of a monitoring job are saved." }, "AWS::SageMaker::ModelBiasJobDefinition MonitoringOutputConfig": { - "KmsKeyId": "The AWS Key Management Service ( AWS KMS ) key that Amazon SageMaker uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption.", + "KmsKeyId": "The AWS Key Management Service ( AWS KMS ) key that Amazon SageMaker AI uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption.", "MonitoringOutputs": "Monitoring outputs for monitoring jobs. This is where the output of the periodic monitoring jobs is uploaded." }, "AWS::SageMaker::ModelBiasJobDefinition MonitoringResources": { @@ -44417,7 +45264,7 @@ "ModelArtifact": "The location of the model artifact.", "ModelCreator": "The creator of the model.", "ModelDescription": "A description of the model.", - "ModelId": "The SageMaker Model ARN or non- SageMaker Model ID.", + "ModelId": "The SageMaker AI Model ARN or non- SageMaker AI Model ID.", "ModelName": "The name of the model.", "ModelOwner": "The owner of the model.", "ModelVersion": "The version of the model.", @@ -44470,7 +45317,7 @@ "TrainingObservations": "Any observations about training." }, "AWS::SageMaker::ModelCard TrainingEnvironment": { - "ContainerImage": "SageMaker inference image URI." + "ContainerImage": "SageMaker AI inference image URI." }, "AWS::SageMaker::ModelCard TrainingHyperParameter": { "Name": "The name of the hyper parameter.", @@ -44478,17 +45325,17 @@ }, "AWS::SageMaker::ModelCard TrainingJobDetails": { "HyperParameters": "The hyper parameters used in the training job.", - "TrainingArn": "The SageMaker training job Amazon Resource Name (ARN)", + "TrainingArn": "The SageMaker AI training job Amazon Resource Name (ARN)", "TrainingDatasets": "The location of the datasets used to train the model.", - "TrainingEnvironment": "The SageMaker training job image URI.", - "TrainingMetrics": "The SageMaker training job results.", + "TrainingEnvironment": "The SageMaker AI training job image URI.", + "TrainingMetrics": "The SageMaker AI training job results.", "UserProvidedHyperParameters": "Additional hyper parameters that you've specified when training the model.", "UserProvidedTrainingMetrics": "Custom training job results." }, "AWS::SageMaker::ModelCard TrainingMetric": { - "Name": "The name of the result from the SageMaker training job.", + "Name": "The name of the result from the SageMaker AI training job.", "Notes": "Any additional notes describing the result of the training job.", - "Value": "The value of a result from the SageMaker training job." + "Value": "The value of a result from the SageMaker AI training job." }, "AWS::SageMaker::ModelCard UserContext": { "DomainId": "The domain associated with the user.", @@ -44564,7 +45411,7 @@ "S3Output": "The Amazon S3 storage location where the results of a monitoring job are saved." }, "AWS::SageMaker::ModelExplainabilityJobDefinition MonitoringOutputConfig": { - "KmsKeyId": "The AWS Key Management Service ( AWS KMS ) key that Amazon SageMaker uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption.", + "KmsKeyId": "The AWS Key Management Service ( AWS KMS ) key that Amazon SageMaker AI uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption.", "MonitoringOutputs": "Monitoring outputs for monitoring jobs. This is where the output of the periodic monitoring jobs is uploaded." }, "AWS::SageMaker::ModelExplainabilityJobDefinition MonitoringResources": { @@ -44882,7 +45729,7 @@ "S3Output": "The Amazon S3 storage location where the results of a monitoring job are saved." }, "AWS::SageMaker::ModelQualityJobDefinition MonitoringOutputConfig": { - "KmsKeyId": "The AWS Key Management Service ( AWS KMS ) key that Amazon SageMaker uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption.", + "KmsKeyId": "The AWS Key Management Service ( AWS KMS ) key that Amazon SageMaker AI uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption.", "MonitoringOutputs": "Monitoring outputs for monitoring jobs. This is where the output of the periodic monitoring jobs is uploaded." }, "AWS::SageMaker::ModelQualityJobDefinition MonitoringResources": { @@ -44982,18 +45829,18 @@ "BaselineConfig": "Baseline configuration used to validate that the data conforms to the specified constraints and statistics", "Environment": "Sets the environment variables in the Docker container.", "MonitoringAppSpecification": "Configures the monitoring job to run a specified Docker container image.", - "MonitoringInputs": "The array of inputs for the monitoring job. Currently we support monitoring an Amazon SageMaker Endpoint.", + "MonitoringInputs": "The array of inputs for the monitoring job. Currently we support monitoring an Amazon SageMaker AI Endpoint.", "MonitoringOutputConfig": "The array of outputs from the monitoring job to be uploaded to Amazon S3.", "MonitoringResources": "Identifies the resources, ML compute instances, and ML storage volumes to deploy for a monitoring job. In distributed processing, you specify more than one instance.", "NetworkConfig": "Specifies networking options for an monitoring job.", - "RoleArn": "The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform tasks on your behalf.", + "RoleArn": "The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker AI can assume to perform tasks on your behalf.", "StoppingCondition": "Specifies a time limit for how long the monitoring job is allowed to run." }, "AWS::SageMaker::MonitoringSchedule MonitoringOutput": { "S3Output": "The Amazon S3 storage location where the results of a monitoring job are saved." }, "AWS::SageMaker::MonitoringSchedule MonitoringOutputConfig": { - "KmsKeyId": "The AWS Key Management Service ( AWS KMS ) key that Amazon SageMaker uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption.", + "KmsKeyId": "The AWS Key Management Service ( AWS KMS ) key that Amazon SageMaker AI uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption.", "MonitoringOutputs": "Monitoring outputs for monitoring jobs. This is where the output of the periodic monitoring jobs is uploaded." }, "AWS::SageMaker::MonitoringSchedule MonitoringResources": { @@ -45018,7 +45865,7 @@ "AWS::SageMaker::MonitoringSchedule ScheduleConfig": { "DataAnalysisEndTime": "Sets the end time for a monitoring job window. Express this time as an offset to the times that you schedule your monitoring jobs to run. You schedule monitoring jobs with the `ScheduleExpression` parameter. Specify this offset in ISO 8601 duration format. For example, if you want to end the window one hour before the start of each monitoring job, you would specify: `\"-PT1H\"` .\n\nThe end time that you specify must not follow the start time that you specify by more than 24 hours. You specify the start time with the `DataAnalysisStartTime` parameter.\n\nIf you set `ScheduleExpression` to `NOW` , this parameter is required.", "DataAnalysisStartTime": "Sets the start time for a monitoring job window. Express this time as an offset to the times that you schedule your monitoring jobs to run. You schedule monitoring jobs with the `ScheduleExpression` parameter. Specify this offset in ISO 8601 duration format. For example, if you want to monitor the five hours of data in your dataset that precede the start of each monitoring job, you would specify: `\"-PT5H\"` .\n\nThe start time that you specify must not precede the end time that you specify by more than 24 hours. You specify the end time with the `DataAnalysisEndTime` parameter.\n\nIf you set `ScheduleExpression` to `NOW` , this parameter is required.", - "ScheduleExpression": "A cron expression that describes details about the monitoring schedule.\n\nThe supported cron expressions are:\n\n- If you want to set the job to start every hour, use the following:\n\n`Hourly: cron(0 * ? * * *)`\n- If you want to start the job daily:\n\n`cron(0 [00-23] ? * * *)`\n- If you want to run the job one time, immediately, use the following keyword:\n\n`NOW`\n\nFor example, the following are valid cron expressions:\n\n- Daily at noon UTC: `cron(0 12 ? * * *)`\n- Daily at midnight UTC: `cron(0 0 ? * * *)`\n\nTo support running every 6, 12 hours, the following are also supported:\n\n`cron(0 [00-23]/[01-24] ? * * *)`\n\nFor example, the following are valid cron expressions:\n\n- Every 12 hours, starting at 5pm UTC: `cron(0 17/12 ? * * *)`\n- Every two hours starting at midnight: `cron(0 0/2 ? * * *)`\n\n> - Even though the cron expression is set to start at 5PM UTC, note that there could be a delay of 0-20 minutes from the actual requested time to run the execution.\n> - We recommend that if you would like a daily schedule, you do not provide this parameter. Amazon SageMaker will pick a time for running every day. \n\nYou can also specify the keyword `NOW` to run the monitoring job immediately, one time, without recurring." + "ScheduleExpression": "A cron expression that describes details about the monitoring schedule.\n\nThe supported cron expressions are:\n\n- If you want to set the job to start every hour, use the following:\n\n`Hourly: cron(0 * ? * * *)`\n- If you want to start the job daily:\n\n`cron(0 [00-23] ? * * *)`\n- If you want to run the job one time, immediately, use the following keyword:\n\n`NOW`\n\nFor example, the following are valid cron expressions:\n\n- Daily at noon UTC: `cron(0 12 ? * * *)`\n- Daily at midnight UTC: `cron(0 0 ? * * *)`\n\nTo support running every 6, 12 hours, the following are also supported:\n\n`cron(0 [00-23]/[01-24] ? * * *)`\n\nFor example, the following are valid cron expressions:\n\n- Every 12 hours, starting at 5pm UTC: `cron(0 17/12 ? * * *)`\n- Every two hours starting at midnight: `cron(0 0/2 ? * * *)`\n\n> - Even though the cron expression is set to start at 5PM UTC, note that there could be a delay of 0-20 minutes from the actual requested time to run the execution.\n> - We recommend that if you would like a daily schedule, you do not provide this parameter. Amazon SageMaker AI will pick a time for running every day. \n\nYou can also specify the keyword `NOW` to run the monitoring job immediately, one time, without recurring." }, "AWS::SageMaker::MonitoringSchedule StatisticsResource": { "S3Uri": "The S3 URI for the statistics resource." @@ -45036,16 +45883,16 @@ }, "AWS::SageMaker::NotebookInstance": { "AcceleratorTypes": "A list of Amazon Elastic Inference (EI) instance types to associate with the notebook instance. Currently, only one instance type can be associated with a notebook instance. For more information, see [Using Elastic Inference in Amazon SageMaker](https://docs.aws.amazon.com/sagemaker/latest/dg/ei.html) .\n\n*Valid Values:* `ml.eia1.medium | ml.eia1.large | ml.eia1.xlarge | ml.eia2.medium | ml.eia2.large | ml.eia2.xlarge` .", - "AdditionalCodeRepositories": "An array of up to three Git repositories associated with the notebook instance. These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in [AWS CodeCommit](https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) or in any other Git repository. These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see [Associating Git Repositories with SageMaker Notebook Instances](https://docs.aws.amazon.com/sagemaker/latest/dg/nbi-git-repo.html) .", - "DefaultCodeRepository": "The Git repository associated with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in [AWS CodeCommit](https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) or in any other Git repository. When you open a notebook instance, it opens in the directory that contains this repository. For more information, see [Associating Git Repositories with SageMaker Notebook Instances](https://docs.aws.amazon.com/sagemaker/latest/dg/nbi-git-repo.html) .", - "DirectInternetAccess": "Sets whether SageMaker provides internet access to the notebook instance. If you set this to `Disabled` this notebook instance is able to access resources only in your VPC, and is not be able to connect to SageMaker training and endpoint services unless you configure a NAT Gateway in your VPC.\n\nFor more information, see [Notebook Instances Are Internet-Enabled by Default](https://docs.aws.amazon.com/sagemaker/latest/dg/appendix-additional-considerations.html#appendix-notebook-and-internet-access) . You can set the value of this parameter to `Disabled` only if you set a value for the `SubnetId` parameter.", + "AdditionalCodeRepositories": "An array of up to three Git repositories associated with the notebook instance. These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in [AWS CodeCommit](https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) or in any other Git repository. These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see [Associating Git Repositories with SageMaker AI Notebook Instances](https://docs.aws.amazon.com/sagemaker/latest/dg/nbi-git-repo.html) .", + "DefaultCodeRepository": "The Git repository associated with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in [AWS CodeCommit](https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) or in any other Git repository. When you open a notebook instance, it opens in the directory that contains this repository. For more information, see [Associating Git Repositories with SageMaker AI Notebook Instances](https://docs.aws.amazon.com/sagemaker/latest/dg/nbi-git-repo.html) .", + "DirectInternetAccess": "Sets whether SageMaker AI provides internet access to the notebook instance. If you set this to `Disabled` this notebook instance is able to access resources only in your VPC, and is not be able to connect to SageMaker AI training and endpoint services unless you configure a NAT Gateway in your VPC.\n\nFor more information, see [Notebook Instances Are Internet-Enabled by Default](https://docs.aws.amazon.com/sagemaker/latest/dg/appendix-additional-considerations.html#appendix-notebook-and-internet-access) . You can set the value of this parameter to `Disabled` only if you set a value for the `SubnetId` parameter.", "InstanceMetadataServiceConfiguration": "Information on the IMDS configuration of the notebook instance", "InstanceType": "The type of ML compute instance to launch for the notebook instance.\n\n> Expect some interruption of service if this parameter is changed as CloudFormation stops a notebook instance and starts it up again to update it.", - "KmsKeyId": "The Amazon Resource Name (ARN) of a AWS Key Management Service key that SageMaker uses to encrypt data on the storage volume attached to your notebook instance. The KMS key you provide must be enabled. For information, see [Enabling and Disabling Keys](https://docs.aws.amazon.com/kms/latest/developerguide/enabling-keys.html) in the *AWS Key Management Service Developer Guide* .", + "KmsKeyId": "The Amazon Resource Name (ARN) of a AWS Key Management Service key that SageMaker AI uses to encrypt data on the storage volume attached to your notebook instance. The KMS key you provide must be enabled. For information, see [Enabling and Disabling Keys](https://docs.aws.amazon.com/kms/latest/developerguide/enabling-keys.html) in the *AWS Key Management Service Developer Guide* .", "LifecycleConfigName": "The name of a lifecycle configuration to associate with the notebook instance. For information about lifecycle configurations, see [Customize a Notebook Instance](https://docs.aws.amazon.com/sagemaker/latest/dg/notebook-lifecycle-config.html) in the *Amazon SageMaker Developer Guide* .", "NotebookInstanceName": "The name of the new notebook instance.", "PlatformIdentifier": "The platform identifier of the notebook instance runtime environment.", - "RoleArn": "When you send any requests to AWS resources from the notebook instance, SageMaker assumes this role to perform tasks on your behalf. You must grant this role necessary permissions so SageMaker can perform these tasks. The policy must allow the SageMaker service principal (sagemaker.amazonaws.com) permissions to assume this role. For more information, see [SageMaker Roles](https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html) .\n\n> To be able to pass this role to SageMaker, the caller of this API must have the `iam:PassRole` permission.", + "RoleArn": "When you send any requests to AWS resources from the notebook instance, SageMaker AI assumes this role to perform tasks on your behalf. You must grant this role necessary permissions so SageMaker AI can perform these tasks. The policy must allow the SageMaker AI service principal (sagemaker.amazonaws.com) permissions to assume this role. For more information, see [SageMaker AI Roles](https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html) .\n\n> To be able to pass this role to SageMaker AI, the caller of this API must have the `iam:PassRole` permission.", "RootAccess": "Whether root access is enabled or disabled for users of the notebook instance. The default value is `Enabled` .\n\n> Lifecycle configurations need root access to be able to set up a notebook instance. Because of this, lifecycle configurations associated with a notebook instance always run with root access even if you disable root access for users.", "SecurityGroupIds": "The VPC security group IDs, in the form sg-xxxxxxxx. The security groups must be for the same VPC as specified in the subnet.", "SubnetId": "The ID of the subnet in a VPC to which you would like to have a connectivity from your ML compute instance.", @@ -45067,6 +45914,28 @@ "AWS::SageMaker::NotebookInstanceLifecycleConfig NotebookInstanceLifecycleHook": { "Content": "A base64-encoded string that contains a shell script for a notebook instance lifecycle configuration." }, + "AWS::SageMaker::PartnerApp": { + "ApplicationConfig": "", + "AuthType": "", + "EnableIamSessionBasedIdentity": "", + "ExecutionRoleArn": "", + "MaintenanceConfig": "", + "Name": "The name of the SageMaker Partner AI App.", + "Tags": "", + "Tier": "", + "Type": "The type of SageMaker Partner AI App to create. Must be one of the following: `lakera-guard` , `comet` , `deepchecks-llm-evaluation` , or `fiddler` ." + }, + "AWS::SageMaker::PartnerApp PartnerAppConfig": { + "AdminUsers": "The list of users that are given admin access to the SageMaker Partner AI App.", + "Arguments": "This is a map of required inputs for a SageMaker Partner AI App. Based on the application type, the map is populated with a key and value pair that is specific to the user and application." + }, + "AWS::SageMaker::PartnerApp PartnerAppMaintenanceConfig": { + "MaintenanceWindowStart": "The day and time of the week in Coordinated Universal Time (UTC) 24-hour standard time that weekly maintenance updates are scheduled. This value must take the following format: `3-letter-day:24-h-hour:minute` . For example: `TUE:03:30` ." + }, + "AWS::SageMaker::PartnerApp Tag": { + "Key": "The tag key. Tag keys must be unique per resource.", + "Value": "The tag value." + }, "AWS::SageMaker::Pipeline": { "ParallelismConfiguration": "The parallelism configuration applied to the pipeline.", "PipelineDefinition": "The definition of the pipeline. This can be either a JSON string or an Amazon S3 location.", @@ -45131,7 +46000,8 @@ "RepositoryUrl": "The URL of the Git repository." }, "AWS::SageMaker::Space CustomFileSystem": { - "EFSFileSystem": "A custom file system in Amazon EFS." + "EFSFileSystem": "A custom file system in Amazon EFS.", + "FSxLustreFileSystem": "A custom file system in Amazon FSx for Lustre." }, "AWS::SageMaker::Space CustomImage": { "AppImageConfigName": "The name of the AppImageConfig.", @@ -45144,13 +46014,16 @@ "AWS::SageMaker::Space EbsStorageSettings": { "EbsVolumeSizeInGb": "The size of an EBS storage volume for a space." }, + "AWS::SageMaker::Space FSxLustreFileSystem": { + "FileSystemId": "Amazon FSx for Lustre file system ID." + }, "AWS::SageMaker::Space JupyterServerAppSettings": { - "DefaultResourceSpec": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the JupyterServer app. If you use the `LifecycleConfigArns` parameter, then this parameter is also required.", + "DefaultResourceSpec": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker AI image used by the JupyterServer app. If you use the `LifecycleConfigArns` parameter, then this parameter is also required.", "LifecycleConfigArns": "The Amazon Resource Name (ARN) of the Lifecycle Configurations attached to the JupyterServerApp. If you use this parameter, the `DefaultResourceSpec` parameter is also required.\n\n> To remove a Lifecycle Config, you must set `LifecycleConfigArns` to an empty list." }, "AWS::SageMaker::Space KernelGatewayAppSettings": { - "CustomImages": "A list of custom SageMaker images that are configured to run as a KernelGateway app.", - "DefaultResourceSpec": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the KernelGateway app.\n\n> The Amazon SageMaker Studio UI does not use the default instance type value set here. The default instance type set here is used when Apps are created using the AWS CLI or AWS CloudFormation and the instance type parameter value is not passed.", + "CustomImages": "A list of custom SageMaker AI images that are configured to run as a KernelGateway app.", + "DefaultResourceSpec": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker AI image used by the KernelGateway app.\n\n> The Amazon SageMaker AI Studio UI does not use the default instance type value set here. The default instance type set here is used when Apps are created using the AWS CLI or AWS CloudFormation and the instance type parameter value is not passed.", "LifecycleConfigArns": "The Amazon Resource Name (ARN) of the Lifecycle Configurations attached to the the user profile or domain.\n\n> To remove a Lifecycle Config, you must set `LifecycleConfigArns` to an empty list." }, "AWS::SageMaker::Space OwnershipSettings": { @@ -45159,7 +46032,7 @@ "AWS::SageMaker::Space ResourceSpec": { "InstanceType": "The instance type that the image version runs on.\n\n> *JupyterServer apps* only support the `system` value.\n> \n> For *KernelGateway apps* , the `system` value is translated to `ml.t3.medium` . KernelGateway apps also support all other values for available instance types.", "LifecycleConfigArn": "The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource.", - "SageMakerImageArn": "The ARN of the SageMaker image that the image version belongs to.", + "SageMakerImageArn": "The ARN of the SageMaker AI image that the image version belongs to.", "SageMakerImageVersionArn": "The ARN of the image version created on the instance." }, "AWS::SageMaker::Space SpaceAppLifecycleManagement": { @@ -45178,9 +46051,9 @@ "DefaultResourceSpec": "Specifies the ARNs of a SageMaker image and SageMaker image version, and the instance type that the version runs on." }, "AWS::SageMaker::Space SpaceSettings": { - "AppType": "The type of app created within the space.", + "AppType": "The type of app created within the space.\n\nIf using the [UpdateSpace](https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_UpdateSpace.html) API, you can't change the app type of your space by specifying a different value for this field.", "CodeEditorAppSettings": "The Code Editor application settings.", - "CustomFileSystems": "A file system, created by you, that you assign to a space for an Amazon SageMaker Domain. Permitted users can access this file system in Amazon SageMaker Studio.", + "CustomFileSystems": "A file system, created by you, that you assign to a space for an Amazon SageMaker AI Domain. Permitted users can access this file system in Amazon SageMaker AI Studio.", "JupyterLabAppSettings": "The settings for the JupyterLab application.", "JupyterServerAppSettings": "The JupyterServer app settings.", "KernelGatewayAppSettings": "The KernelGateway app settings.", @@ -45199,7 +46072,7 @@ "AWS::SageMaker::StudioLifecycleConfig": { "StudioLifecycleConfigAppType": "The App type to which the Lifecycle Configuration is attached.", "StudioLifecycleConfigContent": "", - "StudioLifecycleConfigName": "The name of the Amazon SageMaker Studio Lifecycle Configuration.", + "StudioLifecycleConfigName": "The name of the Amazon SageMaker AI Studio Lifecycle Configuration.", "Tags": "" }, "AWS::SageMaker::StudioLifecycleConfig Tag": { @@ -45227,7 +46100,8 @@ "RepositoryUrl": "The URL of the Git repository." }, "AWS::SageMaker::UserProfile CustomFileSystemConfig": { - "EFSFileSystemConfig": "The settings for a custom Amazon EFS file system." + "EFSFileSystemConfig": "The settings for a custom Amazon EFS file system.", + "FSxLustreFileSystemConfig": "The settings for a custom Amazon FSx for Lustre file system." }, "AWS::SageMaker::UserProfile CustomImage": { "AppImageConfigName": "The name of the AppImageConfig.", @@ -45247,6 +46121,10 @@ }, "AWS::SageMaker::UserProfile EFSFileSystemConfig": { "FileSystemId": "The ID of your Amazon EFS file system.", + "FileSystemPath": "The path to the file system directory that is accessible in Amazon SageMaker AI Studio. Permitted users can access only this directory and below." + }, + "AWS::SageMaker::UserProfile FSxLustreFileSystemConfig": { + "FileSystemId": "The globally unique, 17-digit, ID of the file system, assigned by Amazon FSx for Lustre.", "FileSystemPath": "The path to the file system directory that is accessible in Amazon SageMaker Studio. Permitted users can access only this directory and below." }, "AWS::SageMaker::UserProfile IdleSettings": { @@ -45267,8 +46145,8 @@ "LifecycleConfigArns": "The Amazon Resource Name (ARN) of the Lifecycle Configurations attached to the JupyterServerApp. If you use this parameter, the `DefaultResourceSpec` parameter is also required.\n\n> To remove a Lifecycle Config, you must set `LifecycleConfigArns` to an empty list." }, "AWS::SageMaker::UserProfile KernelGatewayAppSettings": { - "CustomImages": "A list of custom SageMaker images that are configured to run as a KernelGateway app.", - "DefaultResourceSpec": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the KernelGateway app.\n\n> The Amazon SageMaker Studio UI does not use the default instance type value set here. The default instance type set here is used when Apps are created using the AWS CLI or AWS CloudFormation and the instance type parameter value is not passed.", + "CustomImages": "A list of custom SageMaker AI images that are configured to run as a KernelGateway app.", + "DefaultResourceSpec": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker AI image used by the KernelGateway app.\n\n> The Amazon SageMaker AI Studio UI does not use the default instance type value set here. The default instance type set here is used when Apps are created using the AWS CLI or AWS CloudFormation and the instance type parameter value is not passed.", "LifecycleConfigArns": "The Amazon Resource Name (ARN) of the Lifecycle Configurations attached to the the user profile or domain.\n\n> To remove a Lifecycle Config, you must set `LifecycleConfigArns` to an empty list." }, "AWS::SageMaker::UserProfile RStudioServerProAppSettings": { @@ -45278,7 +46156,7 @@ "AWS::SageMaker::UserProfile ResourceSpec": { "InstanceType": "The instance type that the image version runs on.\n\n> *JupyterServer apps* only support the `system` value.\n> \n> For *KernelGateway apps* , the `system` value is translated to `ml.t3.medium` . KernelGateway apps also support all other values for available instance types.", "LifecycleConfigArn": "The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource.", - "SageMakerImageArn": "The ARN of the SageMaker image that the image version belongs to.", + "SageMakerImageArn": "The ARN of the SageMaker AI image that the image version belongs to.", "SageMakerImageVersionArn": "The ARN of the image version created on the instance." }, "AWS::SageMaker::UserProfile SharingSettings": { @@ -45296,7 +46174,7 @@ }, "AWS::SageMaker::UserProfile UserSettings": { "CodeEditorAppSettings": "The Code Editor application settings.\n\nSageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.", - "CustomFileSystemConfigs": "The settings for assigning a custom file system to a user profile. Permitted users can access this file system in Amazon SageMaker Studio.\n\nSageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.", + "CustomFileSystemConfigs": "The settings for assigning a custom file system to a user profile. Permitted users can access this file system in Amazon SageMaker AI Studio.\n\nSageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.", "CustomPosixUserConfig": "Details about the POSIX identity that is used for file system operations.\n\nSageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.", "DefaultLandingUri": "The default experience that the user is directed to when accessing the domain. The supported values are:\n\n- `studio::` : Indicates that Studio is the default experience. This value can only be passed if `StudioWebPortal` is set to `ENABLED` .\n- `app:JupyterServer:` : Indicates that Studio Classic is the default experience.", "ExecutionRole": "The execution role for the user.\n\nSageMaker applies this setting only to private spaces that the user creates in the domain. SageMaker doesn't apply this setting to shared spaces.", @@ -45304,8 +46182,8 @@ "JupyterServerAppSettings": "The Jupyter server's app settings.", "KernelGatewayAppSettings": "The kernel gateway app settings.", "RStudioServerProAppSettings": "A collection of settings that configure user interaction with the `RStudioServerPro` app.", - "SecurityGroups": "The security groups for the Amazon Virtual Private Cloud (VPC) that the domain uses for communication.\n\nOptional when the `CreateDomain.AppNetworkAccessType` parameter is set to `PublicInternetOnly` .\n\nRequired when the `CreateDomain.AppNetworkAccessType` parameter is set to `VpcOnly` , unless specified as part of the `DefaultUserSettings` for the domain.\n\nAmazon SageMaker adds a security group to allow NFS traffic from Amazon SageMaker Studio. Therefore, the number of security groups that you can specify is one less than the maximum number shown.\n\nSageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.", - "SharingSettings": "Specifies options for sharing Amazon SageMaker Studio notebooks.", + "SecurityGroups": "The security groups for the Amazon Virtual Private Cloud (VPC) that the domain uses for communication.\n\nOptional when the `CreateDomain.AppNetworkAccessType` parameter is set to `PublicInternetOnly` .\n\nRequired when the `CreateDomain.AppNetworkAccessType` parameter is set to `VpcOnly` , unless specified as part of the `DefaultUserSettings` for the domain.\n\nAmazon SageMaker AI adds a security group to allow NFS traffic from Amazon SageMaker AI Studio. Therefore, the number of security groups that you can specify is one less than the maximum number shown.\n\nSageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.", + "SharingSettings": "Specifies options for sharing Amazon SageMaker AI Studio notebooks.", "SpaceStorageSettings": "The storage settings for a space.\n\nSageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.", "StudioWebPortal": "Whether the user can access Studio. If this value is set to `DISABLED` , the user cannot access Studio, even if that is the default experience for the domain.", "StudioWebPortalSettings": "Studio settings. If these settings are applied on a user level, they take priority over the settings applied on a domain level." @@ -45531,15 +46409,15 @@ "ComplianceSecurityControlId": "The security control ID for which a finding was generated. Security control IDs are the same across standards.\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", "ComplianceStatus": "The result of a security check. This field is only used for findings generated from controls.\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", "Confidence": "The likelihood that a finding accurately identifies the behavior or issue that it was intended to identify. `Confidence` is scored on a 0\u2013100 basis using a ratio scale. A value of `0` means 0 percent confidence, and a value of `100` means 100 percent confidence. For example, a data exfiltration detection based on a statistical deviation of network traffic has low confidence because an actual exfiltration hasn't been verified. For more information, see [Confidence](https://docs.aws.amazon.com/securityhub/latest/userguide/asff-top-level-attributes.html#asff-confidence) in the *AWS Security Hub User Guide* .\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", - "CreatedAt": "A timestamp that indicates when this finding record was created.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats that you can send to Security Hub:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )\n\nIf a finding provider sends a finding to Security Hub that contains a timestamp in nanoseconds, we round it to milliseconds. For example, we round `2024-10-31T23:00:00.123456789Z` to `2024-10-31T23:00:00.123Z` .\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", + "CreatedAt": "A timestamp that indicates when this finding record was created.\n\nFor more information about the validation and formatting of timestamp fields in AWS Security Hub , see [Timestamps](https://docs.aws.amazon.com/securityhub/1.0/APIReference/Welcome.html#timestamps) .\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", "Criticality": "The level of importance that is assigned to the resources that are associated with a finding. `Criticality` is scored on a 0\u2013100 basis, using a ratio scale that supports only full integers. A score of `0` means that the underlying resources have no criticality, and a score of `100` is reserved for the most critical resources. For more information, see [Criticality](https://docs.aws.amazon.com/securityhub/latest/userguide/asff-top-level-attributes.html#asff-criticality) in the *AWS Security Hub User Guide* .\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", "Description": "A finding's description.\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", - "FirstObservedAt": "A timestamp that indicates when the potential security issue captured by a finding was first observed by the security findings product.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats that you can send to Security Hub:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )\n\nIf a finding provider sends a finding to Security Hub that contains a timestamp in nanoseconds, we round it to milliseconds. For example, we round `2024-10-31T23:00:00.123456789Z` to `2024-10-31T23:00:00.123Z` .\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", + "FirstObservedAt": "A timestamp that indicates when the potential security issue captured by a finding was first observed by the security findings product.\n\nFor more information about the validation and formatting of timestamp fields in AWS Security Hub , see [Timestamps](https://docs.aws.amazon.com/securityhub/1.0/APIReference/Welcome.html#timestamps) .\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", "GeneratorId": "The identifier for the solution-specific component that generated a finding.\n\nArray Members: Minimum number of 1 item. Maximum number of 100 items.", "Id": "The product-specific identifier for a finding.\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", - "LastObservedAt": "A timestamp that indicates when the potential security issue captured by a finding was most recently observed by the security findings product.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats that you can send to Security Hub:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )\n\nIf a finding provider sends a finding to Security Hub that contains a timestamp in nanoseconds, we round it to milliseconds. For example, we round `2024-10-31T23:00:00.123456789Z` to `2024-10-31T23:00:00.123Z` .\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", + "LastObservedAt": "A timestamp that indicates when the security findings provider most recently observed a change in the resource that is involved in the finding.\n\nFor more information about the validation and formatting of timestamp fields in AWS Security Hub , see [Timestamps](https://docs.aws.amazon.com/securityhub/1.0/APIReference/Welcome.html#timestamps) .\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", "NoteText": "The text of a user-defined note that's added to a finding.\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", - "NoteUpdatedAt": "The timestamp of when the note was updated.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats that you can send to Security Hub:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )\n\nIf a finding provider sends a finding to Security Hub that contains a timestamp in nanoseconds, we round it to milliseconds. For example, we round `2024-10-31T23:00:00.123456789Z` to `2024-10-31T23:00:00.123Z` .\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", + "NoteUpdatedAt": "The timestamp of when the note was updated.\n\nFor more information about the validation and formatting of timestamp fields in AWS Security Hub , see [Timestamps](https://docs.aws.amazon.com/securityhub/1.0/APIReference/Welcome.html#timestamps) .\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", "NoteUpdatedBy": "The principal that created a note.\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", "ProductArn": "The Amazon Resource Name (ARN) for a third-party product that generated a finding in Security Hub.\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", "ProductName": "Provides the name of the product that generated the finding. For control-based findings, the product name is Security Hub.\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", @@ -45556,15 +46434,15 @@ "SourceUrl": "Provides a URL that links to a page about the current finding in the finding product.\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", "Title": "A finding's title.\n\nArray Members: Minimum number of 1 item. Maximum number of 100 items.", "Type": "One or more finding types in the format of namespace/category/classifier that classify a finding. For a list of namespaces, classifiers, and categories, see [Types taxonomy for ASFF](https://docs.aws.amazon.com/securityhub/latest/userguide/securityhub-findings-format-type-taxonomy.html) in the *AWS Security Hub User Guide* .\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", - "UpdatedAt": "A timestamp that indicates when the finding record was most recently updated.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats that you can send to Security Hub:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )\n\nIf a finding provider sends a finding to Security Hub that contains a timestamp in nanoseconds, we round it to milliseconds. For example, we round `2024-10-31T23:00:00.123456789Z` to `2024-10-31T23:00:00.123Z` .\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", + "UpdatedAt": "A timestamp that indicates when the finding record was most recently updated.\n\nFor more information about the validation and formatting of timestamp fields in AWS Security Hub , see [Timestamps](https://docs.aws.amazon.com/securityhub/1.0/APIReference/Welcome.html#timestamps) .\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", "UserDefinedFields": "A list of user-defined name and value string pairs added to a finding.\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", "VerificationState": "Provides the veracity of a finding.\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", "WorkflowStatus": "Provides information about the status of the investigation into a finding.\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items." }, "AWS::SecurityHub::AutomationRule DateFilter": { "DateRange": "A date range for the date filter.", - "End": "A timestamp that provides the end date for the date filter.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats that you can send to Security Hub:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )\n\nIf a finding provider sends a finding to Security Hub that contains a timestamp in nanoseconds, we round it to milliseconds. For example, we round `2024-10-31T23:00:00.123456789Z` to `2024-10-31T23:00:00.123Z` .", - "Start": "A timestamp that provides the start date for the date filter.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats that you can send to Security Hub:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )\n\nIf a finding provider sends a finding to Security Hub that contains a timestamp in nanoseconds, we round it to milliseconds. For example, we round `2024-10-31T23:00:00.123456789Z` to `2024-10-31T23:00:00.123Z` ." + "End": "A timestamp that provides the end date for the date filter.\n\nFor more information about the validation and formatting of timestamp fields in AWS Security Hub , see [Timestamps](https://docs.aws.amazon.com/securityhub/1.0/APIReference/Welcome.html#timestamps) .", + "Start": "A timestamp that provides the start date for the date filter.\n\nFor more information about the validation and formatting of timestamp fields in AWS Security Hub , see [Timestamps](https://docs.aws.amazon.com/securityhub/1.0/APIReference/Welcome.html#timestamps) ." }, "AWS::SecurityHub::AutomationRule DateRange": { "Unit": "A date range unit for the date filter.", @@ -45645,7 +46523,7 @@ "Regions": "If `RegionLinkingMode` is `ALL_REGIONS_EXCEPT_SPECIFIED` , then this is a space-separated list of Regions that do not aggregate findings to the aggregation Region.\n\nIf `RegionLinkingMode` is `SPECIFIED_REGIONS` , then this is a space-separated list of Regions that do aggregate findings to the aggregation Region." }, "AWS::SecurityHub::Hub": { - "AutoEnableControls": "Whether to automatically enable new controls when they are added to standards that are enabled.\n\nBy default, this is set to `true` , and new controls are enabled automatically. To not automatically enable new controls, set this to `false` .", + "AutoEnableControls": "Whether to automatically enable new controls when they are added to standards that are enabled.\n\nBy default, this is set to `true` , and new controls are enabled automatically. To not automatically enable new controls, set this to `false` .\n\nWhen you automatically enable new controls, you can interact with the controls in the console and programmatically immediately after release. However, automatically enabled controls have a temporary default status of `DISABLED` . It can take up to several days for Security Hub to process the control release and designate the control as `ENABLED` in your account. During the processing period, you can manually enable or disable a control, and Security Hub will maintain that designation regardless of whether you have `AutoEnableControls` set to `true` .", "ControlFindingGenerator": "Specifies whether an account has consolidated control findings turned on or off. If the value for this field is set to `SECURITY_CONTROL` , Security Hub generates a single finding for a control check even when the check applies to multiple enabled standards.\n\nIf the value for this field is set to `STANDARD_CONTROL` , Security Hub generates separate findings for a control check when the check applies to multiple enabled standards.\n\nThe value for this field in a member account matches the value in the administrator account. For accounts that aren't part of an organization, the default value of this field is `SECURITY_CONTROL` if you enabled Security Hub on or after February 23, 2023.", "EnableDefaultStandards": "Whether to enable the security standards that Security Hub has designated as automatically enabled. If you don't provide a value for `EnableDefaultStandards` , it is set to `true` , and the designated standards are automatically enabled in each AWS Region where you enable Security Hub . If you don't want to enable the designated standards, set `EnableDefaultStandards` to `false` .\n\nCurrently, the automatically enabled standards are the Center for Internet Security (CIS) AWS Foundations Benchmark v1.2.0 and AWS Foundational Security Best Practices (FSBP).", "Tags": "An array of key-value pairs to apply to this resource.\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html) ." @@ -45665,7 +46543,7 @@ "ComplianceSecurityControlParametersValue": "The current value of a security control parameter.", "ComplianceStatus": "Exclusive to findings that are generated as the result of a check run against a specific rule in a supported standard, such as CIS AWS Foundations. Contains security standard-related finding details.", "Confidence": "A finding's confidence. Confidence is defined as the likelihood that a finding accurately identifies the behavior or issue that it was intended to identify.\n\nConfidence is scored on a 0-100 basis using a ratio scale, where 0 means zero percent confidence and 100 means 100 percent confidence.", - "CreatedAt": "A timestamp that indicates when the security findings provider created the potential security issue that a finding reflects.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats that you can send to Security Hub:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )\n\nIf a finding provider sends a finding to Security Hub that contains a timestamp in nanoseconds, we round it to milliseconds. For example, we round `2024-10-31T23:00:00.123456789Z` to `2024-10-31T23:00:00.123Z` .", + "CreatedAt": "A timestamp that indicates when the security findings provider created the potential security issue that a finding reflects.\n\nFor more information about the validation and formatting of timestamp fields in AWS Security Hub , see [Timestamps](https://docs.aws.amazon.com/securityhub/1.0/APIReference/Welcome.html#timestamps) .", "Criticality": "The level of importance assigned to the resources associated with the finding.\n\nA score of 0 means that the underlying resources have no criticality, and a score of 100 is reserved for the most critical resources.", "Description": "A finding's description.", "FindingProviderFieldsConfidence": "The finding provider value for the finding confidence. Confidence is defined as the likelihood that a finding accurately identifies the behavior or issue that it was intended to identify.\n\nConfidence is scored on a 0-100 basis using a ratio scale, where 0 means zero percent confidence and 100 means 100 percent confidence.", @@ -45675,11 +46553,11 @@ "FindingProviderFieldsSeverityLabel": "The finding provider value for the severity label.", "FindingProviderFieldsSeverityOriginal": "The finding provider's original value for the severity.", "FindingProviderFieldsTypes": "One or more finding types that the finding provider assigned to the finding. Uses the format of `namespace/category/classifier` that classify a finding.\n\nValid namespace values are: Software and Configuration Checks | TTPs | Effects | Unusual Behaviors | Sensitive Data Identifications", - "FirstObservedAt": "A timestamp that indicates when the security findings provider first observed the potential security issue that a finding captured.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats that you can send to Security Hub:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )\n\nIf a finding provider sends a finding to Security Hub that contains a timestamp in nanoseconds, we round it to milliseconds. For example, we round `2024-10-31T23:00:00.123456789Z` to `2024-10-31T23:00:00.123Z` .", + "FirstObservedAt": "A timestamp that indicates when the security findings provider first observed the potential security issue that a finding captured.\n\nFor more information about the validation and formatting of timestamp fields in AWS Security Hub , see [Timestamps](https://docs.aws.amazon.com/securityhub/1.0/APIReference/Welcome.html#timestamps) .", "GeneratorId": "The identifier for the solution-specific component (a discrete unit of logic) that generated a finding. In various security findings providers' solutions, this generator can be called a rule, a check, a detector, a plugin, etc.", "Id": "The security findings provider-specific identifier for a finding.", "Keyword": "This field is deprecated. A keyword for a finding.", - "LastObservedAt": "A timestamp that indicates when the security findings provider most recently observed the potential security issue that a finding captured.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats that you can send to Security Hub:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )\n\nIf a finding provider sends a finding to Security Hub that contains a timestamp in nanoseconds, we round it to milliseconds. For example, we round `2024-10-31T23:00:00.123456789Z` to `2024-10-31T23:00:00.123Z` .", + "LastObservedAt": "A timestamp that indicates when the security findings provider most recently observed a change in the resource that is involved in the finding.\n\nFor more information about the validation and formatting of timestamp fields in AWS Security Hub , see [Timestamps](https://docs.aws.amazon.com/securityhub/1.0/APIReference/Welcome.html#timestamps) .", "MalwareName": "The name of the malware that was observed.", "MalwarePath": "The filesystem path of the malware that was observed.", "MalwareState": "The state of the malware that was observed.", @@ -45698,12 +46576,12 @@ "NoteText": "The text of a note.", "NoteUpdatedAt": "The timestamp of when the note was updated.", "NoteUpdatedBy": "The principal that created a note.", - "ProcessLaunchedAt": "A timestamp that identifies when the process was launched.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats that you can send to Security Hub:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )\n\nIf a finding provider sends a finding to Security Hub that contains a timestamp in nanoseconds, we round it to milliseconds. For example, we round `2024-10-31T23:00:00.123456789Z` to `2024-10-31T23:00:00.123Z` .", + "ProcessLaunchedAt": "A timestamp that identifies when the process was launched.\n\nFor more information about the validation and formatting of timestamp fields in AWS Security Hub , see [Timestamps](https://docs.aws.amazon.com/securityhub/1.0/APIReference/Welcome.html#timestamps) .", "ProcessName": "The name of the process.", "ProcessParentPid": "The parent process ID. This field accepts positive integers between `O` and `2147483647` .", "ProcessPath": "The path to the process executable.", "ProcessPid": "The process ID.", - "ProcessTerminatedAt": "A timestamp that identifies when the process was terminated.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats that you can send to Security Hub:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )\n\nIf a finding provider sends a finding to Security Hub that contains a timestamp in nanoseconds, we round it to milliseconds. For example, we round `2024-10-31T23:00:00.123456789Z` to `2024-10-31T23:00:00.123Z` .", + "ProcessTerminatedAt": "A timestamp that identifies when the process was terminated.\n\nFor more information about the validation and formatting of timestamp fields in AWS Security Hub , see [Timestamps](https://docs.aws.amazon.com/securityhub/1.0/APIReference/Welcome.html#timestamps) .", "ProductArn": "The ARN generated by Security Hub that uniquely identifies a third-party company (security findings provider) after this provider's product (solution that generates findings) is registered with Security Hub.", "ProductFields": "A data type where security findings providers can include additional solution-specific details that aren't part of the defined `AwsSecurityFinding` format.", "ProductName": "The name of the solution (product) that generates findings.", @@ -45732,7 +46610,7 @@ "ResourceAwsS3BucketOwnerName": "The display name of the owner of the S3 bucket.", "ResourceContainerImageId": "The identifier of the image related to a finding.", "ResourceContainerImageName": "The name of the image related to a finding.", - "ResourceContainerLaunchedAt": "A timestamp that identifies when the container was started.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats that you can send to Security Hub:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )\n\nIf a finding provider sends a finding to Security Hub that contains a timestamp in nanoseconds, we round it to milliseconds. For example, we round `2024-10-31T23:00:00.123456789Z` to `2024-10-31T23:00:00.123Z` .", + "ResourceContainerLaunchedAt": "A timestamp that identifies when the container was started.\n\nFor more information about the validation and formatting of timestamp fields in AWS Security Hub , see [Timestamps](https://docs.aws.amazon.com/securityhub/1.0/APIReference/Welcome.html#timestamps) .", "ResourceContainerName": "The name of the container related to a finding.", "ResourceDetailsOther": "The details of a resource that doesn't have a specific subfield for the resource type defined.", "ResourceId": "The canonical identifier for the given resource type.", @@ -45746,14 +46624,14 @@ "SeverityProduct": "Deprecated. This attribute isn't included in findings. Instead of providing `Product` , provide `Original` .\n\nThe native severity as defined by the AWS service or integrated partner product that generated the finding.", "SourceUrl": "A URL that links to a page about the current finding in the security findings provider's solution.", "ThreatIntelIndicatorCategory": "The category of a threat intelligence indicator.", - "ThreatIntelIndicatorLastObservedAt": "A timestamp that identifies the last observation of a threat intelligence indicator.", + "ThreatIntelIndicatorLastObservedAt": "A timestamp that identifies the last observation of a threat intelligence indicator.\n\nFor more information about the validation and formatting of timestamp fields in AWS Security Hub , see [Timestamps](https://docs.aws.amazon.com/securityhub/1.0/APIReference/Welcome.html#timestamps) .", "ThreatIntelIndicatorSource": "The source of the threat intelligence.", "ThreatIntelIndicatorSourceUrl": "The URL for more details from the source of the threat intelligence.", "ThreatIntelIndicatorType": "The type of a threat intelligence indicator.", "ThreatIntelIndicatorValue": "The value of a threat intelligence indicator.", "Title": "A finding's title.", "Type": "A finding type in the format of `namespace/category/classifier` that classifies a finding.", - "UpdatedAt": "A timestamp that indicates when the security findings provider last updated the finding record.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats that you can send to Security Hub:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )\n\nIf a finding provider sends a finding to Security Hub that contains a timestamp in nanoseconds, we round it to milliseconds. For example, we round `2024-10-31T23:00:00.123456789Z` to `2024-10-31T23:00:00.123Z` .", + "UpdatedAt": "A timestamp that indicates when the security findings provider last updated the finding record.\n\nFor more information about the validation and formatting of timestamp fields in AWS Security Hub , see [Timestamps](https://docs.aws.amazon.com/securityhub/1.0/APIReference/Welcome.html#timestamps) .", "UserDefinedFields": "A list of name/value string pairs associated with the finding. These are custom, user-defined fields added to a finding.", "VerificationState": "The veracity of a finding.", "VulnerabilitiesExploitAvailable": "Indicates whether a software vulnerability in your environment has a known exploit. You can filter findings by this field only if you use Security Hub and Amazon Inspector.", @@ -45766,8 +46644,8 @@ }, "AWS::SecurityHub::Insight DateFilter": { "DateRange": "A date range for the date filter.", - "End": "A timestamp that provides the end date for the date filter.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats that you can send to Security Hub:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )\n\nIf a finding provider sends a finding to Security Hub that contains a timestamp in nanoseconds, we round it to milliseconds. For example, we round `2024-10-31T23:00:00.123456789Z` to `2024-10-31T23:00:00.123Z` .", - "Start": "A timestamp that provides the start date for the date filter.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats that you can send to Security Hub:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )\n\nIf a finding provider sends a finding to Security Hub that contains a timestamp in nanoseconds, we round it to milliseconds. For example, we round `2024-10-31T23:00:00.123456789Z` to `2024-10-31T23:00:00.123Z` ." + "End": "A timestamp that provides the end date for the date filter.\n\nFor more information about the validation and formatting of timestamp fields in AWS Security Hub , see [Timestamps](https://docs.aws.amazon.com/securityhub/1.0/APIReference/Welcome.html#timestamps) .", + "Start": "A timestamp that provides the start date for the date filter.\n\nFor more information about the validation and formatting of timestamp fields in AWS Security Hub , see [Timestamps](https://docs.aws.amazon.com/securityhub/1.0/APIReference/Welcome.html#timestamps) ." }, "AWS::SecurityHub::Insight DateRange": { "Unit": "A date range unit for the date filter.", @@ -46161,6 +47039,7 @@ "HealthCheckCustomConfig": "A complex type that contains information about an optional custom health check.\n\n> If you specify a health check configuration, you can specify either `HealthCheckCustomConfig` or `HealthCheckConfig` but not both.", "Name": "The name of the service.", "NamespaceId": "The ID of the namespace that was used to create the service.\n\n> You must specify a value for `NamespaceId` either for the service properties or for [DnsConfig](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-servicediscovery-service-dnsconfig.html) . Don't specify a value in both places.", + "ServiceAttributes": "A string map that contains the following information for the service:\n\n- The attributes that apply to the service\n- For each attribute, the applicable value.\n\nYou can specify a total of 30 attributes.", "Tags": "The tags for the service. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.", "Type": "If present, specifies that the service instances are only discoverable using the `DiscoverInstances` API operation. No DNS records is registered for the service instances. The only valid value is `HTTP` ." }, @@ -46360,7 +47239,7 @@ "ArtifactS3Location": "The location in Amazon S3 where Synthetics stores artifacts from the runs of this canary. Artifacts include the log file, screenshots, and HAR files. Specify the full location path, including `s3://` at the beginning of the path.", "Code": "Use this structure to input your script code for the canary. This structure contains the Lambda handler with the location where the canary should start running the script. If the script is stored in an S3 bucket, the bucket name, key, and version are also included. If the script is passed into the canary directly, the script code is contained in the value of `Script` .", "ExecutionRoleArn": "The ARN of the IAM role to be used to run the canary. This role must already exist, and must include `lambda.amazonaws.com` as a principal in the trust policy. The role must also have the following permissions:\n\n- `s3:PutObject`\n- `s3:GetBucketLocation`\n- `s3:ListAllMyBuckets`\n- `cloudwatch:PutMetricData`\n- `logs:CreateLogGroup`\n- `logs:CreateLogStream`\n- `logs:PutLogEvents`", - "FailureRetentionPeriod": "The number of days to retain data about failed runs of this canary. If you omit this field, the default of 31 days is used. The valid range is 1 to 455 days.", + "FailureRetentionPeriod": "The number of days to retain data about failed runs of this canary. If you omit this field, the default of 31 days is used. The valid range is 1 to 455 days.\n\nThis setting affects the range of information returned by [GetCanaryRuns](https://docs.aws.amazon.com/AmazonSynthetics/latest/APIReference/API_GetCanaryRuns.html) , as well as the range of information displayed in the Synthetics console.", "Name": "The name for this canary. Be sure to give it a descriptive name that distinguishes it from other canaries in your account.\n\nDo not include secrets or proprietary information in your canary names. The canary name makes up part of the canary ARN, and the ARN is included in outbound calls over the internet. For more information, see [Security Considerations for Synthetics Canaries](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/servicelens_canaries_security.html) .", "ProvisionedResourceCleanup": "Specifies whether to also delete the Lambda functions and layers used by this canary when the canary is deleted. If it is `AUTOMATIC` , the Lambda functions and layers will be deleted when the canary is deleted.\n\nIf the value of this parameter is `OFF` , then the value of the `DeleteLambda` parameter of the [DeleteCanary](https://docs.aws.amazon.com/AmazonSynthetics/latest/APIReference/API_DeleteCanary.html) operation determines whether the Lambda functions and layers will be deleted.", "ResourcesToReplicateTags": "To have the tags that you apply to this canary also be applied to the Lambda function that the canary uses, specify this property with the value `lambda-function` . If you do this, CloudWatch Synthetics will keep the tags of the canary and the Lambda function synchronized. Any future changes you make to the canary's tags will also be applied to the function.", @@ -46368,7 +47247,7 @@ "RuntimeVersion": "Specifies the runtime version to use for the canary. For more information about runtime versions, see [Canary Runtime Versions](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch_Synthetics_Canaries_Library.html) .", "Schedule": "A structure that contains information about how often the canary is to run, and when these runs are to stop.", "StartCanaryAfterCreation": "Specify TRUE to have the canary start making runs immediately after it is created.\n\nA canary that you create using CloudFormation can't be used to monitor the CloudFormation stack that creates the canary or to roll back that stack if there is a failure.", - "SuccessRetentionPeriod": "The number of days to retain data about successful runs of this canary. If you omit this field, the default of 31 days is used. The valid range is 1 to 455 days.", + "SuccessRetentionPeriod": "The number of days to retain data about successful runs of this canary. If you omit this field, the default of 31 days is used. The valid range is 1 to 455 days.\n\nThis setting affects the range of information returned by [GetCanaryRuns](https://docs.aws.amazon.com/AmazonSynthetics/latest/APIReference/API_GetCanaryRuns.html) , as well as the range of information displayed in the Synthetics console.", "Tags": "The list of key-value pairs that are associated with the canary.", "VPCConfig": "If this canary is to test an endpoint in a VPC, this structure contains information about the subnet and security groups of the VPC endpoint. For more information, see [Running a Canary in a VPC](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch_Synthetics_Canaries_VPC.html) .", "VisualReference": "If this canary performs visual monitoring by comparing screenshots, this structure contains the ID of the canary run to use as the baseline for screenshots, and the coordinates of any parts of the screen to ignore during the visual monitoring comparison." @@ -46883,8 +47762,8 @@ }, "AWS::VpcLattice::AccessLogSubscription": { "DestinationArn": "The Amazon Resource Name (ARN) of the destination. The supported destination types are CloudWatch Log groups, Kinesis Data Firehose delivery streams, and Amazon S3 buckets.", - "ResourceIdentifier": "The ID or Amazon Resource Name (ARN) of the service network or service.", - "ServiceNetworkLogType": "", + "ResourceIdentifier": "The ID or ARN of the service network or service.", + "ServiceNetworkLogType": "Log type of the service network.", "Tags": "The tags for the access log subscription." }, "AWS::VpcLattice::AccessLogSubscription Tag": { @@ -46893,14 +47772,14 @@ }, "AWS::VpcLattice::AuthPolicy": { "Policy": "The auth policy.", - "ResourceIdentifier": "The ID or Amazon Resource Name (ARN) of the service network or service for which the policy is created." + "ResourceIdentifier": "The ID or ARN of the service network or service for which the policy is created." }, "AWS::VpcLattice::Listener": { "DefaultAction": "The action for the default rule. Each listener has a default rule. The default rule is used if no other rules match.", "Name": "The name of the listener. A listener name must be unique within a service. The valid characters are a-z, 0-9, and hyphens (-). You can't use a hyphen as the first or last character, or immediately after another hyphen.\n\nIf you don't specify a name, CloudFormation generates one. However, if you specify a name, and later want to replace the resource, you must specify a new name.", "Port": "The listener port. You can specify a value from 1 to 65535. For HTTP, the default is 80. For HTTPS, the default is 443.", "Protocol": "The listener protocol.", - "ServiceIdentifier": "The ID or Amazon Resource Name (ARN) of the service.", + "ServiceIdentifier": "The ID or ARN of the service.", "Tags": "The tags for the listener." }, "AWS::VpcLattice::Listener DefaultAction": { @@ -46921,17 +47800,54 @@ "TargetGroupIdentifier": "The ID of the target group.", "Weight": "Only required if you specify multiple target groups for a forward action. The weight determines how requests are distributed to the target group. For example, if you specify two target groups, each with a weight of 10, each target group receives half the requests. If you specify two target groups, one with a weight of 10 and the other with a weight of 20, the target group with a weight of 20 receives twice as many requests as the other target group. If there's only one target group specified, then the default value is 100." }, + "AWS::VpcLattice::ResourceConfiguration": { + "AllowAssociationToSharableServiceNetwork": "Specifies whether the resource configuration can be associated with a sharable service network.", + "Name": "The name of the resource configuration.", + "PortRanges": "(SINGLE, GROUP, CHILD) The TCP port ranges that a consumer can use to access a resource configuration (for example: 1-65535). You can separate port ranges using commas (for example: 1,2,22-30).", + "ProtocolType": "(SINGLE, GROUP) The protocol accepted by the resource configuration.", + "ResourceConfigurationAuthType": "The auth type for the resource configuration.", + "ResourceConfigurationDefinition": "Identifies the resource configuration in one of the following ways:\n\n- *Amazon Resource Name (ARN)* - Supported resource-types that are provisioned by AWS services, such as RDS databases, can be identified by their ARN.\n- *Domain name* - Any domain name that is publicly resolvable.\n- *IP address* - For IPv4 and IPv6, only IP addresses in the VPC are supported.", + "ResourceConfigurationGroupId": "The ID of the group resource configuration.", + "ResourceConfigurationType": "The type of resource configuration. A resource configuration can be one of the following types:\n\n- *SINGLE* - A single resource.\n- *GROUP* - A group of resources. You must create a group resource configuration before you create a child resource configuration.\n- *CHILD* - A single resource that is part of a group resource configuration.\n- *ARN* - An AWS resource.", + "ResourceGatewayId": "The ID of the resource gateway.", + "Tags": "The tags for the resource configuration." + }, + "AWS::VpcLattice::ResourceConfiguration DnsResource": { + "DomainName": "The domain name of the resource configuration.", + "IpAddressType": "The IP address type for the resource congfiguration." + }, + "AWS::VpcLattice::ResourceConfiguration ResourceConfigurationDefinition": { + "ArnResource": "The Amazon Resource Name (ARN) of the resource configuration. For the ARN syntax and format, see [ARN format](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference-arns.html#arns-syntax) in the *AWS Identity and Access Management user guide* .", + "DnsResource": "The DNS name of the resource configuration.", + "IpResource": "The IP address of the resource configuration." + }, + "AWS::VpcLattice::ResourceConfiguration Tag": { + "Key": "The tag key.", + "Value": "The tag value." + }, + "AWS::VpcLattice::ResourceGateway": { + "IpAddressType": "The type of IP address used by the resource gateway.", + "Name": "The name of the resource gateway.", + "SecurityGroupIds": "The IDs of the security groups applied to the resource gateway.", + "SubnetIds": "The IDs of the VPC subnets for the resource gateway.", + "Tags": "The tags for the resource gateway.", + "VpcIdentifier": "The ID of the VPC for the resource gateway." + }, + "AWS::VpcLattice::ResourceGateway Tag": { + "Key": "The tag key.", + "Value": "The tag value." + }, "AWS::VpcLattice::ResourcePolicy": { "Policy": "The Amazon Resource Name (ARN) of the service network or service.", "ResourceArn": "An IAM policy." }, "AWS::VpcLattice::Rule": { "Action": "Describes the action for a rule.", - "ListenerIdentifier": "The ID or Amazon Resource Name (ARN) of the listener.", + "ListenerIdentifier": "The ID or ARN of the listener.", "Match": "The rule match.", "Name": "The name of the rule. The name must be unique within the listener. The valid characters are a-z, 0-9, and hyphens (-). You can't use a hyphen as the first or last character, or immediately after another hyphen.\n\nIf you don't specify a name, CloudFormation generates one. However, if you specify a name, and later want to replace the resource, you must specify a new name.", "Priority": "The priority assigned to the rule. Each rule for a specific listener must have a unique priority. The lower the priority number the higher the priority.", - "ServiceIdentifier": "The ID or Amazon Resource Name (ARN) of the service.", + "ServiceIdentifier": "The ID or ARN of the service.", "Tags": "The tags for the rule." }, "AWS::VpcLattice::Rule Action": { @@ -46997,20 +47913,29 @@ "AWS::VpcLattice::ServiceNetwork": { "AuthType": "The type of IAM policy.\n\n- `NONE` : The resource does not use an IAM policy. This is the default.\n- `AWS_IAM` : The resource uses an IAM policy. When this type is used, auth is enabled and an auth policy is required.", "Name": "The name of the service network. The name must be unique to the account. The valid characters are a-z, 0-9, and hyphens (-). You can't use a hyphen as the first or last character, or immediately after another hyphen.\n\nIf you don't specify a name, CloudFormation generates one. However, if you specify a name, and later want to replace the resource, you must specify a new name.", - "SharingConfig": "", + "SharingConfig": "Specify if the service network should be enabled for sharing.", "Tags": "The tags for the service network." }, "AWS::VpcLattice::ServiceNetwork SharingConfig": { - "enabled": "" + "enabled": "Specify if the service network should be enabled for sharing." }, "AWS::VpcLattice::ServiceNetwork Tag": { "Key": "The tag key.", "Value": "The tag value." }, + "AWS::VpcLattice::ServiceNetworkResourceAssociation": { + "ResourceConfigurationId": "The ID of the resource configuration associated with the service network.", + "ServiceNetworkId": "The ID of the service network associated with the resource configuration.", + "Tags": "A key-value pair to associate with a resource." + }, + "AWS::VpcLattice::ServiceNetworkResourceAssociation Tag": { + "Key": "The tag key.", + "Value": "A tag value." + }, "AWS::VpcLattice::ServiceNetworkServiceAssociation": { "DnsEntry": "The DNS information of the service.", - "ServiceIdentifier": "The ID or Amazon Resource Name (ARN) of the service.", - "ServiceNetworkIdentifier": "The ID or Amazon Resource Name (ARN) of the service network. You must use the ARN if the resources specified in the operation are in different accounts.", + "ServiceIdentifier": "The ID or ARN of the service.", + "ServiceNetworkIdentifier": "The ID or ARN of the service network. You must use an ARN if the resources are in different accounts.", "Tags": "The tags for the association." }, "AWS::VpcLattice::ServiceNetworkServiceAssociation DnsEntry": { @@ -47023,7 +47948,7 @@ }, "AWS::VpcLattice::ServiceNetworkVpcAssociation": { "SecurityGroupIds": "The IDs of the security groups. Security groups aren't added by default. You can add a security group to apply network level controls to control which resources in a VPC are allowed to access the service network and its services. For more information, see [Control traffic to resources using security groups](https://docs.aws.amazon.com//vpc/latest/userguide/VPC_SecurityGroups.html) in the *Amazon VPC User Guide* .", - "ServiceNetworkIdentifier": "The ID or Amazon Resource Name (ARN) of the service network. You must use the ARN when the resources specified in the operation are in different accounts.", + "ServiceNetworkIdentifier": "The ID or ARN of the service network. You must use an ARN if the resources are in different accounts.", "Tags": "The tags for the association.", "VpcIdentifier": "The ID of the VPC." }, @@ -47474,7 +48399,7 @@ "CustomKeys": "Specifies the aggregate keys to use in a rate-base rule.", "EvaluationWindowSec": "The amount of time, in seconds, that AWS WAF should include in its request counts, looking back from the current time. For example, for a setting of 120, when AWS WAF checks the rate, it counts the requests for the 2 minutes immediately preceding the current time. Valid settings are 60, 120, 300, and 600.\n\nThis setting doesn't determine how often AWS WAF checks the rate, but how far back it looks each time it checks. AWS WAF checks the rate about every 10 seconds.\n\nDefault: `300` (5 minutes)", "ForwardedIPConfig": "The configuration for inspecting IP addresses in an HTTP header that you specify, instead of using the IP address that's reported by the web request origin. Commonly, this is the X-Forwarded-For (XFF) header, but you can specify any header name.\n\n> If the specified header isn't present in the request, AWS WAF doesn't apply the rule to the web request at all. \n\nThis is required if you specify a forwarded IP in the rule's aggregate key settings.", - "Limit": "The limit on requests per 5-minute period for a single aggregation instance for the rate-based rule. If the rate-based statement includes a `ScopeDownStatement` , this limit is applied only to the requests that match the statement.\n\nExamples:\n\n- If you aggregate on just the IP address, this is the limit on requests from any single IP address.\n- If you aggregate on the HTTP method and the query argument name \"city\", then this is the limit on requests for any single method, city pair.", + "Limit": "The limit on requests during the specified evaluation window for a single aggregation instance for the rate-based rule. If the rate-based statement includes a `ScopeDownStatement` , this limit is applied only to the requests that match the statement.\n\nExamples:\n\n- If you aggregate on just the IP address, this is the limit on requests from any single IP address.\n- If you aggregate on the HTTP method and the query argument name \"city\", then this is the limit on requests for any single method, city pair.", "ScopeDownStatement": "An optional nested statement that narrows the scope of the web requests that are evaluated and managed by the rate-based statement. When you use a scope-down statement, the rate-based rule only tracks and rate limits requests that match the scope-down statement. You can use any nestable `Statement` in the scope-down statement, and you can nest statements at any level, the same as you can for a rule statement." }, "AWS::WAFv2::RuleGroup RateBasedStatementCustomKey": { @@ -47525,7 +48450,7 @@ "ChallengeConfig": "Specifies how AWS WAF should handle `Challenge` evaluations. If you don't specify this, AWS WAF uses the challenge configuration that's defined for the web ACL.", "Name": "The name of the rule.\n\nIf you change the name of a `Rule` after you create it and you want the rule's metric name to reflect the change, update the metric name in the rule's `VisibilityConfig` settings. AWS WAF doesn't automatically update the metric name when you update the rule name.", "Priority": "If you define more than one `Rule` in a `WebACL` , AWS WAF evaluates each request against the `Rules` in order based on the value of `Priority` . AWS WAF processes rules with lower priority first. The priorities don't need to be consecutive, but they must all be different.", - "RuleLabels": "Labels to apply to web requests that match the rule match statement. AWS WAF applies fully qualified labels to matching web requests. A fully qualified label is the concatenation of a label namespace and a rule label. The rule's rule group or web ACL defines the label namespace.\n\nRules that run after this rule in the web ACL can match against these labels using a `LabelMatchStatement` .\n\nFor each label, provide a case-sensitive string containing optional namespaces and a label name, according to the following guidelines:\n\n- Separate each component of the label with a colon.\n- Each namespace or name can have up to 128 characters.\n- You can specify up to 5 namespaces in a label.\n- Don't use the following reserved words in your label specification: `aws` , `waf` , `managed` , `rulegroup` , `webacl` , `regexpatternset` , or `ipset` .\n\nFor example, `myLabelName` or `nameSpace1:nameSpace2:myLabelName` .", + "RuleLabels": "Labels to apply to web requests that match the rule match statement. AWS WAF applies fully qualified labels to matching web requests. A fully qualified label is the concatenation of a label namespace and a rule label. The rule's rule group or web ACL defines the label namespace.\n\n> Any rule that isn't a rule group reference statement or managed rule group statement can add labels to matching web requests. \n\nRules that run after this rule in the web ACL can match against these labels using a `LabelMatchStatement` .\n\nFor each label, provide a case-sensitive string containing optional namespaces and a label name, according to the following guidelines:\n\n- Separate each component of the label with a colon.\n- Each namespace or name can have up to 128 characters.\n- You can specify up to 5 namespaces in a label.\n- Don't use the following reserved words in your label specification: `aws` , `waf` , `managed` , `rulegroup` , `webacl` , `regexpatternset` , or `ipset` .\n\nFor example, `myLabelName` or `nameSpace1:nameSpace2:myLabelName` .", "Statement": "The AWS WAF processing statement for the rule, for example `ByteMatchStatement` or `SizeConstraintStatement` .", "VisibilityConfig": "Defines and enables Amazon CloudWatch metrics and web request sample collection.\n\nIf you change the name of a `Rule` after you create it and you want the rule's metric name to reflect the change, update the metric name as well. AWS WAF doesn't automatically update the metric name." }, @@ -47765,7 +48690,7 @@ "ExcludedRules": "Rules in the referenced rule group whose actions are set to `Count` .\n\n> Instead of this option, use `RuleActionOverrides` . It accepts any valid action setting, including `Count` .", "ManagedRuleGroupConfigs": "Additional information that's used by a managed rule group. Many managed rule groups don't require this.\n\nThe rule groups used for intelligent threat mitigation require additional configuration:\n\n- Use the `AWSManagedRulesACFPRuleSet` configuration object to configure the account creation fraud prevention managed rule group. The configuration includes the registration and sign-up pages of your application and the locations in the account creation request payload of data, such as the user email and phone number fields.\n- Use the `AWSManagedRulesATPRuleSet` configuration object to configure the account takeover prevention managed rule group. The configuration includes the sign-in page of your application and the locations in the login request payload of data such as the username and password.\n- Use the `AWSManagedRulesBotControlRuleSet` configuration object to configure the protection level that you want the Bot Control rule group to use.", "Name": "The name of the managed rule group. You use this, along with the vendor name, to identify the rule group.", - "RuleActionOverrides": "Action settings to use in the place of the rule actions that are configured inside the rule group. You specify one override for each rule whose action you want to change.\n\nYou can use overrides for testing, for example you can override all of rule actions to `Count` and then monitor the resulting count metrics to understand how the rule group would handle your web traffic. You can also permanently override some or all actions, to modify how the rule group manages your web traffic.", + "RuleActionOverrides": "Action settings to use in the place of the rule actions that are configured inside the rule group. You specify one override for each rule whose action you want to change.\n\n> Take care to verify the rule names in your overrides. If you provide a rule name that doesn't match the name of any rule in the rule group, AWS WAF doesn't return an error and doesn't apply the override setting. \n\nYou can use overrides for testing, for example you can override all of rule actions to `Count` and then monitor the resulting count metrics to understand how the rule group would handle your web traffic. You can also permanently override some or all actions, to modify how the rule group manages your web traffic.", "ScopeDownStatement": "An optional nested statement that narrows the scope of the web requests that are evaluated by the managed rule group. Requests are only evaluated by the rule group if they match the scope-down statement. You can use any nestable `Statement` in the scope-down statement, and you can nest statements at any level, the same as you can for a rule statement.", "VendorName": "The name of the managed rule group vendor. You use this, along with the rule group name, to identify a rule group.", "Version": "The version of the managed rule group to use. If you specify this, the version setting is fixed until you change it. If you don't specify this, AWS WAF uses the vendor's default version, and then keeps the version at the vendor's default when the vendor updates the managed rule group settings." @@ -47785,7 +48710,7 @@ "CustomKeys": "Specifies the aggregate keys to use in a rate-base rule.", "EvaluationWindowSec": "The amount of time, in seconds, that AWS WAF should include in its request counts, looking back from the current time. For example, for a setting of 120, when AWS WAF checks the rate, it counts the requests for the 2 minutes immediately preceding the current time. Valid settings are 60, 120, 300, and 600.\n\nThis setting doesn't determine how often AWS WAF checks the rate, but how far back it looks each time it checks. AWS WAF checks the rate about every 10 seconds.\n\nDefault: `300` (5 minutes)", "ForwardedIPConfig": "The configuration for inspecting IP addresses in an HTTP header that you specify, instead of using the IP address that's reported by the web request origin. Commonly, this is the X-Forwarded-For (XFF) header, but you can specify any header name.\n\n> If the specified header isn't present in the request, AWS WAF doesn't apply the rule to the web request at all. \n\nThis is required if you specify a forwarded IP in the rule's aggregate key settings.", - "Limit": "The limit on requests per 5-minute period for a single aggregation instance for the rate-based rule. If the rate-based statement includes a `ScopeDownStatement` , this limit is applied only to the requests that match the statement.\n\nExamples:\n\n- If you aggregate on just the IP address, this is the limit on requests from any single IP address.\n- If you aggregate on the HTTP method and the query argument name \"city\", then this is the limit on requests for any single method, city pair.", + "Limit": "The limit on requests during the specified evaluation window for a single aggregation instance for the rate-based rule. If the rate-based statement includes a `ScopeDownStatement` , this limit is applied only to the requests that match the statement.\n\nExamples:\n\n- If you aggregate on just the IP address, this is the limit on requests from any single IP address.\n- If you aggregate on the HTTP method and the query argument name \"city\", then this is the limit on requests for any single method, city pair.", "ScopeDownStatement": "An optional nested statement that narrows the scope of the web requests that are evaluated and managed by the rate-based statement. When you use a scope-down statement, the rate-based rule only tracks and rate limits requests that match the scope-down statement. You can use any nestable `Statement` in the scope-down statement, and you can nest statements at any level, the same as you can for a rule statement." }, "AWS::WAFv2::WebACL RateBasedStatementCustomKey": { @@ -47877,7 +48802,7 @@ "Name": "The name of the rule.\n\nIf you change the name of a `Rule` after you create it and you want the rule's metric name to reflect the change, update the metric name in the rule's `VisibilityConfig` settings. AWS WAF doesn't automatically update the metric name when you update the rule name.", "OverrideAction": "The override action to apply to the rules in a rule group, instead of the individual rule action settings. This is used only for rules whose statements reference a rule group. Rule statements that reference a rule group are `RuleGroupReferenceStatement` and `ManagedRuleGroupStatement` .\n\nSet the override action to none to leave the rule group rule actions in effect. Set it to count to only count matches, regardless of the rule action settings.\n\nYou must set either this `OverrideAction` setting or the `Action` setting, but not both:\n\n- If the rule statement references a rule group, you must set this override action setting and you must not set the rule's action setting.\n- If the rule statement doesn't reference a rule group, you must set the rule action setting and you must not set the rule's override action setting.", "Priority": "If you define more than one `Rule` in a `WebACL` , AWS WAF evaluates each request against the `Rules` in order based on the value of `Priority` . AWS WAF processes rules with lower priority first. The priorities don't need to be consecutive, but they must all be different.", - "RuleLabels": "Labels to apply to web requests that match the rule match statement. AWS WAF applies fully qualified labels to matching web requests. A fully qualified label is the concatenation of a label namespace and a rule label. The rule's rule group or web ACL defines the label namespace.\n\nRules that run after this rule in the web ACL can match against these labels using a `LabelMatchStatement` .\n\nFor each label, provide a case-sensitive string containing optional namespaces and a label name, according to the following guidelines:\n\n- Separate each component of the label with a colon.\n- Each namespace or name can have up to 128 characters.\n- You can specify up to 5 namespaces in a label.\n- Don't use the following reserved words in your label specification: `aws` , `waf` , `managed` , `rulegroup` , `webacl` , `regexpatternset` , or `ipset` .\n\nFor example, `myLabelName` or `nameSpace1:nameSpace2:myLabelName` .", + "RuleLabels": "Labels to apply to web requests that match the rule match statement. AWS WAF applies fully qualified labels to matching web requests. A fully qualified label is the concatenation of a label namespace and a rule label. The rule's rule group or web ACL defines the label namespace.\n\n> Any rule that isn't a rule group reference statement or managed rule group statement can add labels to matching web requests. \n\nRules that run after this rule in the web ACL can match against these labels using a `LabelMatchStatement` .\n\nFor each label, provide a case-sensitive string containing optional namespaces and a label name, according to the following guidelines:\n\n- Separate each component of the label with a colon.\n- Each namespace or name can have up to 128 characters.\n- You can specify up to 5 namespaces in a label.\n- Don't use the following reserved words in your label specification: `aws` , `waf` , `managed` , `rulegroup` , `webacl` , `regexpatternset` , or `ipset` .\n\nFor example, `myLabelName` or `nameSpace1:nameSpace2:myLabelName` .", "Statement": "The AWS WAF processing statement for the rule, for example `ByteMatchStatement` or `SizeConstraintStatement` .", "VisibilityConfig": "Defines and enables Amazon CloudWatch metrics and web request sample collection.\n\nIf you change the name of a `Rule` after you create it and you want the rule's metric name to reflect the change, update the metric name as well. AWS WAF doesn't automatically update the metric name." }, @@ -47895,7 +48820,7 @@ "AWS::WAFv2::WebACL RuleGroupReferenceStatement": { "Arn": "The Amazon Resource Name (ARN) of the entity.", "ExcludedRules": "Rules in the referenced rule group whose actions are set to `Count` .\n\n> Instead of this option, use `RuleActionOverrides` . It accepts any valid action setting, including `Count` .", - "RuleActionOverrides": "Action settings to use in the place of the rule actions that are configured inside the rule group. You specify one override for each rule whose action you want to change.\n\nYou can use overrides for testing, for example you can override all of rule actions to `Count` and then monitor the resulting count metrics to understand how the rule group would handle your web traffic. You can also permanently override some or all actions, to modify how the rule group manages your web traffic." + "RuleActionOverrides": "Action settings to use in the place of the rule actions that are configured inside the rule group. You specify one override for each rule whose action you want to change.\n\n> Take care to verify the rule names in your overrides. If you provide a rule name that doesn't match the name of any rule in the rule group, AWS WAF doesn't return an error and doesn't apply the override setting. \n\nYou can use overrides for testing, for example you can override all of rule actions to `Count` and then monitor the resulting count metrics to understand how the rule group would handle your web traffic. You can also permanently override some or all actions, to modify how the rule group manages your web traffic." }, "AWS::WAFv2::WebACL SingleHeader": { "Name": "The name of the query header to inspect." @@ -47962,9 +48887,11 @@ }, "AWS::Wisdom::AIAgent AIAgentConfiguration": { "AnswerRecommendationAIAgentConfiguration": "The configuration for AI Agents of type `ANSWER_RECOMMENDATION` .", - "ManualSearchAIAgentConfiguration": "The configuration for AI Agents of type `MANUAL_SEARCH` ." + "ManualSearchAIAgentConfiguration": "The configuration for AI Agents of type `MANUAL_SEARCH` .", + "SelfServiceAIAgentConfiguration": "The self-service AI agent configuration." }, "AWS::Wisdom::AIAgent AnswerRecommendationAIAgentConfiguration": { + "AnswerGenerationAIGuardrailId": "The ID of the answer generation AI guardrail.", "AnswerGenerationAIPromptId": "The AI Prompt identifier for the Answer Generation prompt used by the `ANSWER_RECOMMENDATION` AI Agent.", "AssociationConfigurations": "The association configurations for overriding behavior on this AI Agent.", "IntentLabelingGenerationAIPromptId": "The AI Prompt identifier for the Intent Labeling prompt used by the `ANSWER_RECOMMENDATION` AI Agent.", @@ -47984,6 +48911,7 @@ "OverrideKnowledgeBaseSearchType": "" }, "AWS::Wisdom::AIAgent ManualSearchAIAgentConfiguration": { + "AnswerGenerationAIGuardrailId": "The ID of the answer generation AI guardrail.", "AnswerGenerationAIPromptId": "The AI Prompt identifier for the Answer Generation prompt used by the `ANSWER_RECOMMENDATION` AI Agent.", "AssociationConfigurations": "The association configurations for overriding behavior on this AI Agent." }, @@ -47991,6 +48919,12 @@ "AndConditions": "", "TagCondition": "A leaf node condition which can be used to specify a tag condition." }, + "AWS::Wisdom::AIAgent SelfServiceAIAgentConfiguration": { + "AssociationConfigurations": "The association configuration of the self-service AI agent.", + "SelfServiceAIGuardrailId": "The ID of the self-service AI guardrail.", + "SelfServiceAnswerGenerationAIPromptId": "The ID of the self-service answer generation AI prompt.", + "SelfServicePreProcessingAIPromptId": "The ID of the self-service preprocessing AI prompt." + }, "AWS::Wisdom::AIAgent TagCondition": { "Key": "The tag key in the tag condition.", "Value": "The tag value in the tag condition." @@ -48005,6 +48939,72 @@ "AssistantId": "", "ModifiedTimeSeconds": "The time the AI Agent version was last modified in seconds." }, + "AWS::Wisdom::AIGuardrail": { + "AssistantId": "The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN.", + "BlockedInputMessaging": "The message to return when the AI Guardrail blocks a prompt.", + "BlockedOutputsMessaging": "The message to return when the AI Guardrail blocks a model response.", + "ContentPolicyConfig": "Contains details about how to handle harmful content.", + "ContextualGroundingPolicyConfig": "The policy configuration details for the AI Guardrail's contextual grounding policy.", + "Description": "A description of the AI Guardrail.", + "Name": "The name of the AI Guardrail.", + "SensitiveInformationPolicyConfig": "Contains details about PII entities and regular expressions to configure for the AI Guardrail.", + "Tags": "The tags used to organize, track, or control access for this resource.", + "TopicPolicyConfig": "Contains details about topics that the AI Guardrail should identify and deny.", + "WordPolicyConfig": "Contains details about the word policy to configured for the AI Guardrail." + }, + "AWS::Wisdom::AIGuardrail AIGuardrailContentPolicyConfig": { + "FiltersConfig": "List of content filter configurations in a content policy." + }, + "AWS::Wisdom::AIGuardrail AIGuardrailContextualGroundingPolicyConfig": { + "FiltersConfig": "List of contextual grounding filter configs." + }, + "AWS::Wisdom::AIGuardrail AIGuardrailSensitiveInformationPolicyConfig": { + "PiiEntitiesConfig": "List of entities.", + "RegexesConfig": "List of regex." + }, + "AWS::Wisdom::AIGuardrail AIGuardrailTopicPolicyConfig": { + "TopicsConfig": "List of topic configs in topic policy." + }, + "AWS::Wisdom::AIGuardrail AIGuardrailWordPolicyConfig": { + "ManagedWordListsConfig": "A config for the list of managed words.", + "WordsConfig": "List of custom word configurations." + }, + "AWS::Wisdom::AIGuardrail GuardrailContentFilterConfig": { + "InputStrength": "The strength of the input for the guardrail content filter.", + "OutputStrength": "The output strength of the guardrail content filter.", + "Type": "The type of the guardrail content filter." + }, + "AWS::Wisdom::AIGuardrail GuardrailContextualGroundingFilterConfig": { + "Threshold": "The threshold for this filter.", + "Type": "The type of this filter." + }, + "AWS::Wisdom::AIGuardrail GuardrailManagedWordsConfig": { + "Type": "The type of guardrail managed words." + }, + "AWS::Wisdom::AIGuardrail GuardrailPiiEntityConfig": { + "Action": "The action of guardrail PII entity configuration.", + "Type": "" + }, + "AWS::Wisdom::AIGuardrail GuardrailRegexConfig": { + "Action": "The action of the guardrail regex configuration.", + "Description": "The regex description.", + "Name": "A regex configuration.", + "Pattern": "The regex pattern." + }, + "AWS::Wisdom::AIGuardrail GuardrailTopicConfig": { + "Definition": "Definition of topic in topic policy.", + "Examples": "Text example in topic policy.", + "Name": "Name of topic in topic policy.", + "Type": "Type of topic in a policy." + }, + "AWS::Wisdom::AIGuardrail GuardrailWordConfig": { + "Text": "The custom word text." + }, + "AWS::Wisdom::AIGuardrailVersion": { + "AIGuardrailId": "The ID of the AI guardrail version.", + "AssistantId": "The ID of the AI guardrail version assistant.", + "ModifiedTimeSeconds": "The modified time of the AI guardrail version in seconds." + }, "AWS::Wisdom::AIPrompt": { "ApiFormat": "The API format used for this AI Prompt.", "AssistantId": "The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN.", @@ -48069,8 +49069,8 @@ "ObjectFields": "The fields from the source that are made available to your agents in Amazon Q in Connect. Optional if ObjectConfiguration is included in the provided DataIntegration.\n\n- For [Salesforce](https://docs.aws.amazon.com/https://developer.salesforce.com/docs/atlas.en-us.knowledge_dev.meta/knowledge_dev/sforce_api_objects_knowledge__kav.htm) , you must include at least `Id` , `ArticleNumber` , `VersionNumber` , `Title` , `PublishStatus` , and `IsDeleted` .\n- For [ServiceNow](https://docs.aws.amazon.com/https://developer.servicenow.com/dev.do#!/reference/api/rome/rest/knowledge-management-api) , you must include at least `number` , `short_description` , `sys_mod_count` , `workflow_state` , and `active` .\n- For [Zendesk](https://docs.aws.amazon.com/https://developer.zendesk.com/api-reference/help_center/help-center-api/articles/) , you must include at least `id` , `title` , `updated_at` , and `draft` .\n\nMake sure to include additional fields. These fields are indexed and used to source recommendations." }, "AWS::Wisdom::KnowledgeBase BedrockFoundationModelConfiguration": { - "ModelArn": "", - "ParsingPrompt": "" + "ModelArn": "The model ARN of the Bedrock foundation model.", + "ParsingPrompt": "The parsing prompt of the Bedrock foundation model configuration." }, "AWS::Wisdom::KnowledgeBase ChunkingConfiguration": { "ChunkingStrategy": "Knowledge base can split your source data into chunks. A chunk refers to an excerpt from a data source that is returned when the knowledge base that it belongs to is queried. You have the following options for chunking your data. If you opt for `NONE` , then you may want to pre-process your files by splitting them up such that each file corresponds to a chunk.", @@ -48079,7 +49079,7 @@ "SemanticChunkingConfiguration": "Settings for semantic document chunking for a data source. Semantic chunking splits a document into smaller documents based on groups of similar content derived from the text with natural language processing." }, "AWS::Wisdom::KnowledgeBase CrawlerLimits": { - "RateLimit": "" + "RateLimit": "The limit rate at which the crawler is configured." }, "AWS::Wisdom::KnowledgeBase FixedSizeChunkingConfiguration": { "MaxTokens": "The maximum number of tokens to include in a chunk.", @@ -48259,7 +49259,7 @@ }, "AWS::Wisdom::MessageTemplate Tag": { "Key": "", - "Value": "" + "Value": "The value of the message template tag." }, "AWS::Wisdom::MessageTemplateVersion": { "MessageTemplateArn": "The Amazon Resource Name (ARN) of the message template.", @@ -48307,7 +49307,6 @@ "Description": "The description of the pool.", "DirectoryId": "The identifier of the directory used by the pool.", "PoolName": "The name of the pool.", - "Tags": "The tags for the pool.", "TimeoutSettings": "The amount of time that a pool session remains active after users disconnect. If they try to reconnect to the pool session after a disconnection or network interruption within this time interval, they are connected to their previous session. Otherwise, they are connected to a new session with a new pool instance." }, "AWS::WorkSpaces::WorkspacesPool ApplicationSettings": { @@ -48317,10 +49316,6 @@ "AWS::WorkSpaces::WorkspacesPool Capacity": { "DesiredUserSessions": "The desired number of user sessions for the WorkSpaces in the pool." }, - "AWS::WorkSpaces::WorkspacesPool Tag": { - "Key": "The key of the tag.", - "Value": "The value of the tag." - }, "AWS::WorkSpaces::WorkspacesPool TimeoutSettings": { "DisconnectTimeoutInSeconds": "Specifies the amount of time, in seconds, that a streaming session remains active after users disconnect. If users try to reconnect to the streaming session after a disconnection or network interruption within the time set, they are connected to their previous session. Otherwise, they are connected to a new session with a new streaming instance.", "IdleDisconnectTimeoutInSeconds": "The amount of time in seconds a connection will stay active while idle.", diff --git a/schema_source/cloudformation.schema.json b/schema_source/cloudformation.schema.json index 09af736a3..4f72b7f92 100644 --- a/schema_source/cloudformation.schema.json +++ b/schema_source/cloudformation.schema.json @@ -27419,13 +27419,9 @@ "additionalProperties": false, "properties": { "AccessPointId": { - "markdownDescription": "The Amazon EFS access point ID to use. If an access point is specified, the root directory value specified in the `EFSVolumeConfiguration` must either be omitted or set to `/` which enforces the path set on the EFS access point. If an access point is used, transit encryption must be enabled in the `EFSVolumeConfiguration` . For more information, see [Working with Amazon EFS access points](https://docs.aws.amazon.com/efs/latest/ug/efs-access-points.html) in the *Amazon Elastic File System User Guide* .", - "title": "AccessPointId", "type": "string" }, "Iam": { - "markdownDescription": "Whether or not to use the AWS Batch job IAM role defined in a job definition when mounting the Amazon EFS file system. If enabled, transit encryption must be enabled in the `EFSVolumeConfiguration` . If this parameter is omitted, the default value of `DISABLED` is used. For more information, see [Using Amazon EFS access points](https://docs.aws.amazon.com/batch/latest/userguide/efs-volumes.html#efs-volume-accesspoints) in the *AWS Batch User Guide* . EFS IAM authorization requires that `TransitEncryption` be `ENABLED` and that a `JobRoleArn` is specified.", - "title": "Iam", "type": "string" } }, @@ -27471,8 +27467,6 @@ "type": "string" }, "InstanceType": { - "markdownDescription": "The instance type to use for a multi-node parallel job. All node groups in a multi-node parallel job must use the same instance type.\n\n> This parameter isn't applicable to single-node container jobs or jobs that run on Fargate resources, and shouldn't be provided.", - "title": "InstanceType", "type": "string" }, "JobRoleArn": { @@ -27683,28 +27677,18 @@ "additionalProperties": false, "properties": { "AuthorizationConfig": { - "$ref": "#/definitions/AWS::Batch::JobDefinition.AuthorizationConfig", - "markdownDescription": "The authorization configuration details for the Amazon EFS file system.", - "title": "AuthorizationConfig" + "$ref": "#/definitions/AWS::Batch::JobDefinition.AuthorizationConfig" }, "FileSystemId": { - "markdownDescription": "The Amazon EFS file system ID to use.", - "title": "FileSystemId", "type": "string" }, "RootDirectory": { - "markdownDescription": "The directory within the Amazon EFS file system to mount as the root directory inside the host. If this parameter is omitted, the root of the Amazon EFS volume is used instead. Specifying `/` has the same effect as omitting this parameter. The maximum length is 4,096 characters.\n\n> If an EFS access point is specified in the `authorizationConfig` , the root directory parameter must either be omitted or set to `/` , which enforces the path set on the Amazon EFS access point.", - "title": "RootDirectory", "type": "string" }, "TransitEncryption": { - "markdownDescription": "Determines whether to enable encryption for Amazon EFS data in transit between the Amazon ECS host and the Amazon EFS server. Transit encryption must be enabled if Amazon EFS IAM authorization is used. If this parameter is omitted, the default value of `DISABLED` is used. For more information, see [Encrypting data in transit](https://docs.aws.amazon.com/efs/latest/ug/encryption-in-transit.html) in the *Amazon Elastic File System User Guide* .", - "title": "TransitEncryption", "type": "string" }, "TransitEncryptionPort": { - "markdownDescription": "The port to use when sending encrypted data between the Amazon ECS host and the Amazon EFS server. If you don't specify a transit encryption port, it uses the port selection strategy that the Amazon EFS mount helper uses. The value must be between 0 and 65,535. For more information, see [EFS mount helper](https://docs.aws.amazon.com/efs/latest/ug/efs-mount-helper.html) in the *Amazon Elastic File System User Guide* .", - "title": "TransitEncryptionPort", "type": "number" } }, @@ -28114,8 +28098,6 @@ "additionalProperties": false, "properties": { "Labels": { - "markdownDescription": "Key-value pairs used to identify, sort, and organize cube resources. Can contain up to 63 uppercase letters, lowercase letters, numbers, hyphens (-), and underscores (_). Labels can be added or modified at any time. Each resource can have multiple labels, but each key must be unique for a given object.", - "title": "Labels", "type": "object" } }, @@ -28125,18 +28107,12 @@ "additionalProperties": false, "properties": { "ContainerPath": { - "markdownDescription": "The path on the container where the host volume is mounted.", - "title": "ContainerPath", "type": "string" }, "ReadOnly": { - "markdownDescription": "If this value is `true` , the container has read-only access to the volume. Otherwise, the container can write to the volume. The default value is `false` .", - "title": "ReadOnly", "type": "boolean" }, "SourceVolume": { - "markdownDescription": "The name of the volume to mount.", - "title": "SourceVolume", "type": "string" } }, @@ -28221,57 +28197,39 @@ "items": { "$ref": "#/definitions/AWS::Batch::JobDefinition.EksContainer" }, - "markdownDescription": "The properties of the container that's used on the Amazon EKS pod.\n\n> This object is limited to 10 elements.", - "title": "Containers", "type": "array" }, "DnsPolicy": { - "markdownDescription": "The DNS policy for the pod. The default value is `ClusterFirst` . If the `hostNetwork` parameter is not specified, the default is `ClusterFirstWithHostNet` . `ClusterFirst` indicates that any DNS query that does not match the configured cluster domain suffix is forwarded to the upstream nameserver inherited from the node. For more information, see [Pod's DNS policy](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy) in the *Kubernetes documentation* .\n\nValid values: `Default` | `ClusterFirst` | `ClusterFirstWithHostNet`", - "title": "DnsPolicy", "type": "string" }, "HostNetwork": { - "markdownDescription": "Indicates if the pod uses the hosts' network IP address. The default value is `true` . Setting this to `false` enables the Kubernetes pod networking model. Most AWS Batch workloads are egress-only and don't require the overhead of IP allocation for each pod for incoming connections. For more information, see [Host namespaces](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/security/pod-security-policy/#host-namespaces) and [Pod networking](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/workloads/pods/#pod-networking) in the *Kubernetes documentation* .", - "title": "HostNetwork", "type": "boolean" }, "ImagePullSecrets": { "items": { "$ref": "#/definitions/AWS::Batch::JobDefinition.ImagePullSecret" }, - "markdownDescription": "", - "title": "ImagePullSecrets", "type": "array" }, "InitContainers": { "items": { "$ref": "#/definitions/AWS::Batch::JobDefinition.EksContainer" }, - "markdownDescription": "These containers run before application containers, always runs to completion, and must complete successfully before the next container starts. These containers are registered with the Amazon EKS Connector agent and persists the registration information in the Kubernetes backend data store. For more information, see [Init Containers](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/workloads/pods/init-containers/) in the *Kubernetes documentation* .\n\n> This object is limited to 10 elements.", - "title": "InitContainers", "type": "array" }, "Metadata": { - "$ref": "#/definitions/AWS::Batch::JobDefinition.Metadata", - "markdownDescription": "Metadata about the Kubernetes pod. For more information, see [Understanding Kubernetes Objects](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/overview/working-with-objects/kubernetes-objects/) in the *Kubernetes documentation* .", - "title": "Metadata" + "$ref": "#/definitions/AWS::Batch::JobDefinition.Metadata" }, "ServiceAccountName": { - "markdownDescription": "The name of the service account that's used to run the pod. For more information, see [Kubernetes service accounts](https://docs.aws.amazon.com/eks/latest/userguide/service-accounts.html) and [Configure a Kubernetes service account to assume an IAM role](https://docs.aws.amazon.com/eks/latest/userguide/associate-service-account-role.html) in the *Amazon EKS User Guide* and [Configure service accounts for pods](https://docs.aws.amazon.com/https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) in the *Kubernetes documentation* .", - "title": "ServiceAccountName", "type": "string" }, "ShareProcessNamespace": { - "markdownDescription": "Indicates if the processes in a container are shared, or visible, to other containers in the same pod. For more information, see [Share Process Namespace between Containers in a Pod](https://docs.aws.amazon.com/https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/) .", - "title": "ShareProcessNamespace", "type": "boolean" }, "Volumes": { "items": { "$ref": "#/definitions/AWS::Batch::JobDefinition.EksVolume" }, - "markdownDescription": "Specifies the volumes for a job definition that uses Amazon EKS resources.", - "title": "Volumes", "type": "array" } }, @@ -28496,8 +28454,6 @@ "additionalProperties": false, "properties": { "AttemptDurationSeconds": { - "markdownDescription": "The job timeout time (in seconds) that's measured from the job attempt's `startedAt` timestamp. After this time passes, AWS Batch terminates your jobs if they aren't finished. The minimum value for the timeout is 60 seconds.\n\nFor array jobs, the timeout applies to the child jobs, not to the parent array job.\n\nFor multi-node parallel (MNP) jobs, the timeout applies to the whole job, not to the individual nodes.", - "title": "AttemptDurationSeconds", "type": "number" } }, @@ -28561,18 +28517,12 @@ "additionalProperties": false, "properties": { "EfsVolumeConfiguration": { - "$ref": "#/definitions/AWS::Batch::JobDefinition.EfsVolumeConfiguration", - "markdownDescription": "This is used when you're using an Amazon Elastic File System file system for job storage. For more information, see [Amazon EFS Volumes](https://docs.aws.amazon.com/batch/latest/userguide/efs-volumes.html) in the *AWS Batch User Guide* .", - "title": "EfsVolumeConfiguration" + "$ref": "#/definitions/AWS::Batch::JobDefinition.EfsVolumeConfiguration" }, "Host": { - "$ref": "#/definitions/AWS::Batch::JobDefinition.VolumesHost", - "markdownDescription": "The contents of the `host` parameter determine whether your data volume persists on the host container instance and where it's stored. If the host parameter is empty, then the Docker daemon assigns a host path for your data volume. However, the data isn't guaranteed to persist after the containers that are associated with it stop running.\n\n> This parameter isn't applicable to jobs that are running on Fargate resources and shouldn't be provided.", - "title": "Host" + "$ref": "#/definitions/AWS::Batch::JobDefinition.VolumesHost" }, "Name": { - "markdownDescription": "The name of the volume. It can be up to 255 characters long. It can contain uppercase and lowercase letters, numbers, hyphens (-), and underscores (_). This name is referenced in the `sourceVolume` parameter of container definition `mountPoints` .", - "title": "Name", "type": "string" } }, @@ -28582,8 +28532,6 @@ "additionalProperties": false, "properties": { "SourcePath": { - "markdownDescription": "The path on the host container instance that's presented to the container. If this parameter is empty, then the Docker daemon has assigned a host path for you. If this parameter contains a file location, then the data volume persists at the specified location on the host container instance until you delete it manually. If the source path location doesn't exist on the host container instance, the Docker daemon creates it. If the location does exist, the contents of the source path folder are exported.\n\n> This parameter isn't applicable to jobs that run on Fargate resources. Don't provide this for these jobs.", - "title": "SourcePath", "type": "string" } }, @@ -29742,7 +29690,7 @@ "type": "string" }, "Type": { - "markdownDescription": "Configure guardrail type when the PII entity is detected.\n\nThe following PIIs are used to block or mask sensitive information:\n\n- *General*\n\n- *ADDRESS*\n\nA physical address, such as \"100 Main Street, Anytown, USA\" or \"Suite #12, Building 123\". An address can include information such as the street, building, location, city, state, country, county, zip code, precinct, and neighborhood.\n- *AGE*\n\nAn individual's age, including the quantity and unit of time. For example, in the phrase \"I am 40 years old,\" Guarrails recognizes \"40 years\" as an age.\n- *NAME*\n\nAn individual's name. This entity type does not include titles, such as Dr., Mr., Mrs., or Miss. guardrails doesn't apply this entity type to names that are part of organizations or addresses. For example, guardrails recognizes the \"John Doe Organization\" as an organization, and it recognizes \"Jane Doe Street\" as an address.\n- *EMAIL*\n\nAn email address, such as *marymajor@email.com* .\n- *PHONE*\n\nA phone number. This entity type also includes fax and pager numbers.\n- *USERNAME*\n\nA user name that identifies an account, such as a login name, screen name, nick name, or handle.\n- *PASSWORD*\n\nAn alphanumeric string that is used as a password, such as \"* *very20special#pass** \".\n- *DRIVER_ID*\n\nThe number assigned to a driver's license, which is an official document permitting an individual to operate one or more motorized vehicles on a public road. A driver's license number consists of alphanumeric characters.\n- *LICENSE_PLATE*\n\nA license plate for a vehicle is issued by the state or country where the vehicle is registered. The format for passenger vehicles is typically five to eight digits, consisting of upper-case letters and numbers. The format varies depending on the location of the issuing state or country.\n- *VEHICLE_IDENTIFICATION_NUMBER*\n\nA Vehicle Identification Number (VIN) uniquely identifies a vehicle. VIN content and format are defined in the *ISO 3779* specification. Each country has specific codes and formats for VINs.\n- *Finance*\n\n- *REDIT_DEBIT_CARD_CVV*\n\nA three-digit card verification code (CVV) that is present on VISA, MasterCard, and Discover credit and debit cards. For American Express credit or debit cards, the CVV is a four-digit numeric code.\n- *CREDIT_DEBIT_CARD_EXPIRY*\n\nThe expiration date for a credit or debit card. This number is usually four digits long and is often formatted as *month/year* or *MM/YY* . Guardrails recognizes expiration dates such as *01/21* , *01/2021* , and *Jan 2021* .\n- *CREDIT_DEBIT_CARD_NUMBER*\n\nThe number for a credit or debit card. These numbers can vary from 13 to 16 digits in length. However, Amazon Comprehend also recognizes credit or debit card numbers when only the last four digits are present.\n- *PIN*\n\nA four-digit personal identification number (PIN) with which you can access your bank account.\n- *INTERNATIONAL_BANK_ACCOUNT_NUMBER*\n\nAn International Bank Account Number has specific formats in each country. For more information, see [www.iban.com/structure](https://docs.aws.amazon.com/https://www.iban.com/structure) .\n- *SWIFT_CODE*\n\nA SWIFT code is a standard format of Bank Identifier Code (BIC) used to specify a particular bank or branch. Banks use these codes for money transfers such as international wire transfers.\n\nSWIFT codes consist of eight or 11 characters. The 11-digit codes refer to specific branches, while eight-digit codes (or 11-digit codes ending in 'XXX') refer to the head or primary office.\n- *IT*\n\n- *IP_ADDRESS*\n\nAn IPv4 address, such as *198.51.100.0* .\n- *MAC_ADDRESS*\n\nA *media access control* (MAC) address is a unique identifier assigned to a network interface controller (NIC).\n- *URL*\n\nA web address, such as *www.example.com* .\n- *AWS_ACCESS_KEY*\n\nA unique identifier that's associated with a secret access key; you use the access key ID and secret access key to sign programmatic AWS requests cryptographically.\n- *AWS_SECRET_KEY*\n\nA unique identifier that's associated with an access key. You use the access key ID and secret access key to sign programmatic AWS requests cryptographically.\n- *USA specific*\n\n- *US_BANK_ACCOUNT_NUMBER*\n\nA US bank account number, which is typically 10 to 12 digits long.\n- *US_BANK_ROUTING_NUMBER*\n\nA US bank account routing number. These are typically nine digits long,\n- *US_INDIVIDUAL_TAX_IDENTIFICATION_NUMBER*\n\nA US Individual Taxpayer Identification Number (ITIN) is a nine-digit number that starts with a \"9\" and contain a \"7\" or \"8\" as the fourth digit. An ITIN can be formatted with a space or a dash after the third and forth digits.\n- *US_PASSPORT_NUMBER*\n\nA US passport number. Passport numbers range from six to nine alphanumeric characters.\n- *US_SOCIAL_SECURITY_NUMBER*\n\nA US Social Security Number (SSN) is a nine-digit number that is issued to US citizens, permanent residents, and temporary working residents.\n- *Canada specific*\n\n- *CA_HEALTH_NUMBER*\n\nA Canadian Health Service Number is a 10-digit unique identifier, required for individuals to access healthcare benefits.\n- *CA_SOCIAL_INSURANCE_NUMBER*\n\nA Canadian Social Insurance Number (SIN) is a nine-digit unique identifier, required for individuals to access government programs and benefits.\n\nThe SIN is formatted as three groups of three digits, such as *123-456-789* . A SIN can be validated through a simple check-digit process called the [Luhn algorithm](https://docs.aws.amazon.com/https://www.wikipedia.org/wiki/Luhn_algorithm) .\n- *UK Specific*\n\n- *UK_NATIONAL_HEALTH_SERVICE_NUMBER*\n\nA UK National Health Service Number is a 10-17 digit number, such as *485 777 3456* . The current system formats the 10-digit number with spaces after the third and sixth digits. The final digit is an error-detecting checksum.\n- *UK_NATIONAL_INSURANCE_NUMBER*\n\nA UK National Insurance Number (NINO) provides individuals with access to National Insurance (social security) benefits. It is also used for some purposes in the UK tax system.\n\nThe number is nine digits long and starts with two letters, followed by six numbers and one letter. A NINO can be formatted with a space or a dash after the two letters and after the second, forth, and sixth digits.\n- *UK_UNIQUE_TAXPAYER_REFERENCE_NUMBER*\n\nA UK Unique Taxpayer Reference (UTR) is a 10-digit number that identifies a taxpayer or a business.\n- *Custom*\n\n- *Regex filter* - You can use a regular expressions to define patterns for a guardrail to recognize and act upon such as serial number, booking ID etc..", + "markdownDescription": "Configure guardrail type when the PII entity is detected.\n\nThe following PIIs are used to block or mask sensitive information:\n\n- *General*\n\n- *ADDRESS*\n\nA physical address, such as \"100 Main Street, Anytown, USA\" or \"Suite #12, Building 123\". An address can include information such as the street, building, location, city, state, country, county, zip code, precinct, and neighborhood.\n- *AGE*\n\nAn individual's age, including the quantity and unit of time. For example, in the phrase \"I am 40 years old,\" Guardrails recognizes \"40 years\" as an age.\n- *NAME*\n\nAn individual's name. This entity type does not include titles, such as Dr., Mr., Mrs., or Miss. guardrails doesn't apply this entity type to names that are part of organizations or addresses. For example, guardrails recognizes the \"John Doe Organization\" as an organization, and it recognizes \"Jane Doe Street\" as an address.\n- *EMAIL*\n\nAn email address, such as *marymajor@email.com* .\n- *PHONE*\n\nA phone number. This entity type also includes fax and pager numbers.\n- *USERNAME*\n\nA user name that identifies an account, such as a login name, screen name, nick name, or handle.\n- *PASSWORD*\n\nAn alphanumeric string that is used as a password, such as \"* *very20special#pass** \".\n- *DRIVER_ID*\n\nThe number assigned to a driver's license, which is an official document permitting an individual to operate one or more motorized vehicles on a public road. A driver's license number consists of alphanumeric characters.\n- *LICENSE_PLATE*\n\nA license plate for a vehicle is issued by the state or country where the vehicle is registered. The format for passenger vehicles is typically five to eight digits, consisting of upper-case letters and numbers. The format varies depending on the location of the issuing state or country.\n- *VEHICLE_IDENTIFICATION_NUMBER*\n\nA Vehicle Identification Number (VIN) uniquely identifies a vehicle. VIN content and format are defined in the *ISO 3779* specification. Each country has specific codes and formats for VINs.\n- *Finance*\n\n- *CREDIT_DEBIT_CARD_CVV*\n\nA three-digit card verification code (CVV) that is present on VISA, MasterCard, and Discover credit and debit cards. For American Express credit or debit cards, the CVV is a four-digit numeric code.\n- *CREDIT_DEBIT_CARD_EXPIRY*\n\nThe expiration date for a credit or debit card. This number is usually four digits long and is often formatted as *month/year* or *MM/YY* . Guardrails recognizes expiration dates such as *01/21* , *01/2021* , and *Jan 2021* .\n- *CREDIT_DEBIT_CARD_NUMBER*\n\nThe number for a credit or debit card. These numbers can vary from 13 to 16 digits in length. However, Amazon Comprehend also recognizes credit or debit card numbers when only the last four digits are present.\n- *PIN*\n\nA four-digit personal identification number (PIN) with which you can access your bank account.\n- *INTERNATIONAL_BANK_ACCOUNT_NUMBER*\n\nAn International Bank Account Number has specific formats in each country. For more information, see [www.iban.com/structure](https://docs.aws.amazon.com/https://www.iban.com/structure) .\n- *SWIFT_CODE*\n\nA SWIFT code is a standard format of Bank Identifier Code (BIC) used to specify a particular bank or branch. Banks use these codes for money transfers such as international wire transfers.\n\nSWIFT codes consist of eight or 11 characters. The 11-digit codes refer to specific branches, while eight-digit codes (or 11-digit codes ending in 'XXX') refer to the head or primary office.\n- *IT*\n\n- *IP_ADDRESS*\n\nAn IPv4 address, such as *198.51.100.0* .\n- *MAC_ADDRESS*\n\nA *media access control* (MAC) address is a unique identifier assigned to a network interface controller (NIC).\n- *URL*\n\nA web address, such as *www.example.com* .\n- *AWS_ACCESS_KEY*\n\nA unique identifier that's associated with a secret access key; you use the access key ID and secret access key to sign programmatic AWS requests cryptographically.\n- *AWS_SECRET_KEY*\n\nA unique identifier that's associated with an access key. You use the access key ID and secret access key to sign programmatic AWS requests cryptographically.\n- *USA specific*\n\n- *US_BANK_ACCOUNT_NUMBER*\n\nA US bank account number, which is typically 10 to 12 digits long.\n- *US_BANK_ROUTING_NUMBER*\n\nA US bank account routing number. These are typically nine digits long,\n- *US_INDIVIDUAL_TAX_IDENTIFICATION_NUMBER*\n\nA US Individual Taxpayer Identification Number (ITIN) is a nine-digit number that starts with a \"9\" and contain a \"7\" or \"8\" as the fourth digit. An ITIN can be formatted with a space or a dash after the third and forth digits.\n- *US_PASSPORT_NUMBER*\n\nA US passport number. Passport numbers range from six to nine alphanumeric characters.\n- *US_SOCIAL_SECURITY_NUMBER*\n\nA US Social Security Number (SSN) is a nine-digit number that is issued to US citizens, permanent residents, and temporary working residents.\n- *Canada specific*\n\n- *CA_HEALTH_NUMBER*\n\nA Canadian Health Service Number is a 10-digit unique identifier, required for individuals to access healthcare benefits.\n- *CA_SOCIAL_INSURANCE_NUMBER*\n\nA Canadian Social Insurance Number (SIN) is a nine-digit unique identifier, required for individuals to access government programs and benefits.\n\nThe SIN is formatted as three groups of three digits, such as *123-456-789* . A SIN can be validated through a simple check-digit process called the [Luhn algorithm](https://docs.aws.amazon.com/https://www.wikipedia.org/wiki/Luhn_algorithm) .\n- *UK Specific*\n\n- *UK_NATIONAL_HEALTH_SERVICE_NUMBER*\n\nA UK National Health Service Number is a 10-17 digit number, such as *485 777 3456* . The current system formats the 10-digit number with spaces after the third and sixth digits. The final digit is an error-detecting checksum.\n- *UK_NATIONAL_INSURANCE_NUMBER*\n\nA UK National Insurance Number (NINO) provides individuals with access to National Insurance (social security) benefits. It is also used for some purposes in the UK tax system.\n\nThe number is nine digits long and starts with two letters, followed by six numbers and one letter. A NINO can be formatted with a space or a dash after the two letters and after the second, forth, and sixth digits.\n- *UK_UNIQUE_TAXPAYER_REFERENCE_NUMBER*\n\nA UK Unique Taxpayer Reference (UTR) is a 10-digit number that identifies a taxpayer or a business.\n- *Custom*\n\n- *Regex filter* - You can use a regular expressions to define patterns for a guardrail to recognize and act upon such as serial number, booking ID etc..", "title": "Type", "type": "string" } @@ -33226,7 +33174,7 @@ }, "TableReference": { "$ref": "#/definitions/AWS::CleanRooms::ConfiguredTable.TableReference", - "markdownDescription": "The AWS Glue table that this configured table represents.", + "markdownDescription": "The table that this configured table represents.", "title": "TableReference" }, "Tags": { @@ -33900,7 +33848,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "", + "markdownDescription": "An optional label that you can assign to a resource when you create it. Each tag consists of a key and an optional value, both of which you define. When you use tagging, you can also use tag-based access control in IAM policies to control access to this resource.", "title": "Tags", "type": "array" } @@ -34474,7 +34422,7 @@ "type": "string" }, "ConfigurationAlias": { - "markdownDescription": "Specifies the activated Hook type configuration, in this AWS account and AWS Region .\n\nDefaults to `default` alias. Hook types currently support default configuration alias.", + "markdownDescription": "An alias by which to refer to this configuration data.\n\nDefaults to `default` alias. Hook types currently support default configuration alias.", "title": "ConfigurationAlias", "type": "string" }, @@ -34561,7 +34509,7 @@ "title": "LoggingConfig" }, "SchemaHandlerPackage": { - "markdownDescription": "A URL to the Amazon S3 bucket containing the Hook project package that contains the necessary files for the Hook you want to register.\n\nFor information on generating a schema handler package for the resource you want to register, see [submit](https://docs.aws.amazon.com/cloudformation-cli/latest/userguide/resource-type-cli-submit.html) in the *CloudFormation CLI User Guide for Extension Development* .\n\n> The user registering the resource must be able to access the package in the S3 bucket. That's, the user must have [GetObject](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) permissions for the schema handler package. For more information, see [Actions, Resources, and Condition Keys for Amazon S3](https://docs.aws.amazon.com/IAM/latest/UserGuide/list_amazons3.html) in the *AWS Identity and Access Management User Guide* .", + "markdownDescription": "A URL to the Amazon S3 bucket containing the Hook project package that contains the necessary files for the Hook you want to register.\n\nFor information on generating a schema handler package, see [Modeling custom CloudFormation Hooks](https://docs.aws.amazon.com/cloudformation-cli/latest/hooks-userguide/hooks-model.html) in the *AWS CloudFormation Hooks User Guide* .\n\n> To register the Hook, you must have `s3:GetObject` permissions to access the S3 objects.", "title": "SchemaHandlerPackage", "type": "string" }, @@ -34655,17 +34603,17 @@ "type": "string" }, "FunctionName": { - "markdownDescription": "The Amazon Resource Name (ARN) of the underlying AWS Lambda function that you want AWS CloudFormation to invoke when the macro is run.", + "markdownDescription": "The Amazon Resource Name (ARN) of the underlying Lambda function that you want CloudFormation to invoke when the macro is run.", "title": "FunctionName", "type": "string" }, "LogGroupName": { - "markdownDescription": "The CloudWatch Logs group to which AWS CloudFormation sends error logging information when invoking the macro's underlying AWS Lambda function.", + "markdownDescription": "The CloudWatch Logs group to which CloudFormation sends error logging information when invoking the macro's underlying Lambda function.", "title": "LogGroupName", "type": "string" }, "LogRoleARN": { - "markdownDescription": "The ARN of the role AWS CloudFormation should assume when sending log entries to CloudWatch Logs .", + "markdownDescription": "The ARN of the role CloudFormation should assume when sending log entries to CloudWatch Logs .", "title": "LogRoleARN", "type": "string" }, @@ -34816,7 +34764,7 @@ "type": "string" }, "ModulePackage": { - "markdownDescription": "A URL to the S3 bucket containing the package that contains the template fragment and schema files for the module version to register.\n\n> The user registering the module version must be able to access the module package in the S3 bucket. That's, the user needs to have [GetObject](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) permissions for the package. For more information, see [Actions, Resources, and Condition Keys for Amazon S3](https://docs.aws.amazon.com/IAM/latest/UserGuide/list_amazons3.html) in the *AWS Identity and Access Management User Guide* .", + "markdownDescription": "A URL to the S3 bucket containing the package that contains the template fragment and schema files for the module version to register.\n\nFor more information, see [Module structure and requirements](https://docs.aws.amazon.com/cloudformation-cli/latest/userguide/modules-structure.html) in the *AWS CloudFormation Command Line Interface (CLI) User Guide* .\n\n> To register the module version, you must have `s3:GetObject` permissions to access the S3 objects.", "title": "ModulePackage", "type": "string" } @@ -34889,7 +34837,7 @@ "type": "string" }, "LogDeliveryBucket": { - "markdownDescription": "The S3 bucket to which CloudFormation delivers the contract test execution logs.\n\nCloudFormation delivers the logs by the time contract testing has completed and the extension has been assigned a test type status of `PASSED` or `FAILED` .\n\nThe user initiating the stack operation must be able to access items in the specified S3 bucket. Specifically, the user needs the following permissions:\n\n- GetObject\n- PutObject\n\nFor more information, see [Actions, Resources, and Condition Keys for Amazon S3](https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazons3.html) in the *AWS Identity and Access Management User Guide* .", + "markdownDescription": "The S3 bucket to which CloudFormation delivers the contract test execution logs.\n\nCloudFormation delivers the logs by the time contract testing has completed and the extension has been assigned a test type status of `PASSED` or `FAILED` .\n\nThe user initiating the stack operation must be able to access items in the specified S3 bucket. Specifically, the user needs the following permissions:\n\n- s3:GetObject\n- s3:PutObject", "title": "LogDeliveryBucket", "type": "string" }, @@ -35112,7 +35060,7 @@ "additionalProperties": false, "properties": { "ExecutionRoleArn": { - "markdownDescription": "The Amazon Resource Name (ARN) of the IAM role for CloudFormation to assume when invoking the resource. If your resource calls AWS APIs in any of its handlers, you must create an *[IAM execution role](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html)* that includes the necessary permissions to call those AWS APIs, and provision that execution role in your account. When CloudFormation needs to invoke the resource type handler, CloudFormation assumes this execution role to create a temporary session token, which it then passes to the resource type handler, thereby supplying your resource type with the appropriate credentials.", + "markdownDescription": "The Amazon Resource Name (ARN) of the IAM role for CloudFormation to assume when invoking the resource. If your resource calls AWS APIs in any of its handlers, you must create an IAM execution role that includes the necessary permissions to call those AWS APIs, and provision that execution role in your account. When CloudFormation needs to invoke the resource type handler, CloudFormation assumes this execution role to create a temporary session token, which it then passes to the resource type handler, thereby supplying your resource type with the appropriate credentials.", "title": "ExecutionRoleArn", "type": "string" }, @@ -35122,7 +35070,7 @@ "title": "LoggingConfig" }, "SchemaHandlerPackage": { - "markdownDescription": "A URL to the S3 bucket containing the resource project package that contains the necessary files for the resource you want to register.\n\nFor information on generating a schema handler package for the resource you want to register, see [submit](https://docs.aws.amazon.com/cloudformation-cli/latest/userguide/resource-type-cli-submit.html) in the *CloudFormation CLI User Guide* .\n\n> The user registering the resource must be able to access the package in the S3 bucket. That is, the user needs to have [GetObject](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) permissions for the schema handler package. For more information, see [Actions, Resources, and Condition Keys for Amazon S3](https://docs.aws.amazon.com/IAM/latest/UserGuide/list_amazons3.html) in the *AWS Identity and Access Management User Guide* .", + "markdownDescription": "A URL to the S3 bucket containing the resource project package that contains the necessary files for the resource you want to register.\n\nFor information on generating a schema handler package, see [Modeling resource types to use with AWS CloudFormation](https://docs.aws.amazon.com/cloudformation-cli/latest/userguide/resource-type-model.html) in the *AWS CloudFormation Command Line Interface (CLI) User Guide* .\n\n> To register the resource version, you must have `s3:GetObject` permissions to access the S3 objects.", "title": "SchemaHandlerPackage", "type": "string" }, @@ -35238,7 +35186,7 @@ "type": "array" }, "TemplateURL": { - "markdownDescription": "Location of file containing the template body. The URL must point to a template (max size: 460,800 bytes) that's located in an Amazon S3 bucket. For more information, see [Template anatomy](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-anatomy.html) .\n\nWhether an update causes interruptions depends on the resources that are being updated. An update never causes a nested stack to be replaced.", + "markdownDescription": "The URL of a file containing the template body. The URL must point to a template (max size: 1 MB) that's located in an Amazon S3 bucket. The location for an Amazon S3 bucket must start with `https://` .\n\nWhether an update causes interruptions depends on the resources that are being updated. An update never causes a nested stack to be replaced.", "title": "TemplateURL", "type": "string" }, @@ -35310,7 +35258,7 @@ "additionalProperties": false, "properties": { "AdministrationRoleARN": { - "markdownDescription": "The Amazon Resource Number (ARN) of the IAM role to use to create this stack set. Specify an IAM role only if you are using customized administrator roles to control which users or groups can manage specific stack sets within the same administrator account.\n\nUse customized administrator roles to control which users or groups can manage specific stack sets within the same administrator account. For more information, see [Prerequisites: Granting Permissions for Stack Set Operations](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/stacksets-prereqs.html) in the *AWS CloudFormation User Guide* .\n\n*Minimum* : `20`\n\n*Maximum* : `2048`", + "markdownDescription": "The Amazon Resource Number (ARN) of the IAM role to use to create this stack set. Specify an IAM role only if you are using customized administrator roles to control which users or groups can manage specific stack sets within the same administrator account.\n\nUse customized administrator roles to control which users or groups can manage specific stack sets within the same administrator account. For more information, see [Grant self-managed permissions](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/stacksets-prereqs-self-managed.html) in the *AWS CloudFormation User Guide* .\n\n*Minimum* : `20`\n\n*Maximum* : `2048`", "title": "AdministrationRoleARN", "type": "string" }, @@ -35328,7 +35276,7 @@ "items": { "type": "string" }, - "markdownDescription": "The capabilities that are allowed in the stack set. Some stack set templates might include resources that can affect permissions in your AWS account \u2014for example, by creating new AWS Identity and Access Management ( IAM ) users. For more information, see [Acknowledging IAM Resources in AWS CloudFormation Templates](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-iam-template.html#capabilities) .", + "markdownDescription": "The capabilities that are allowed in the stack set. Some stack set templates might include resources that can affect permissions in your AWS account \u2014for example, by creating new IAM users. For more information, see [Acknowledging IAM resources in CloudFormation templates](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/control-access-with-iam.html#using-iam-capabilities) in the *AWS CloudFormation User Guide* .", "title": "Capabilities", "type": "array" }, @@ -35338,7 +35286,7 @@ "type": "string" }, "ExecutionRoleName": { - "markdownDescription": "The name of the IAM execution role to use to create the stack set. If you don't specify an execution role, AWS CloudFormation uses the `AWSCloudFormationStackSetExecutionRole` role for the stack set operation.\n\n*Minimum* : `1`\n\n*Maximum* : `64`\n\n*Pattern* : `[a-zA-Z_0-9+=,.@-]+`", + "markdownDescription": "The name of the IAM execution role to use to create the stack set. If you don't specify an execution role, CloudFormation uses the `AWSCloudFormationStackSetExecutionRole` role for the stack set operation.\n\n*Minimum* : `1`\n\n*Maximum* : `64`\n\n*Pattern* : `[a-zA-Z_0-9+=,.@-]+`", "title": "ExecutionRoleName", "type": "string" }, @@ -35349,7 +35297,7 @@ }, "OperationPreferences": { "$ref": "#/definitions/AWS::CloudFormation::StackSet.OperationPreferences", - "markdownDescription": "The user-specified preferences for how AWS CloudFormation performs a stack set operation.", + "markdownDescription": "The user-specified preferences for how CloudFormation performs a stack set operation.", "title": "OperationPreferences" }, "Parameters": { @@ -35361,7 +35309,7 @@ "type": "array" }, "PermissionModel": { - "markdownDescription": "Describes how the IAM roles required for stack set operations are created.\n\n- With `SELF_MANAGED` permissions, you must create the administrator and execution roles required to deploy to target accounts. For more information, see [Grant Self-Managed Stack Set Permissions](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/stacksets-prereqs-self-managed.html) .\n- With `SERVICE_MANAGED` permissions, StackSets automatically creates the IAM roles required to deploy to accounts managed by AWS Organizations .", + "markdownDescription": "Describes how the IAM roles required for stack set operations are created.\n\n- With `SELF_MANAGED` permissions, you must create the administrator and execution roles required to deploy to target accounts. For more information, see [Grant self-managed permissions](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/stacksets-prereqs-self-managed.html) in the *AWS CloudFormation User Guide* .\n- With `SERVICE_MANAGED` permissions, StackSets automatically creates the IAM roles required to deploy to accounts managed by AWS Organizations . For more information, see [Activate trusted access for stack sets with AWS Organizations](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/stacksets-orgs-activate-trusted-access.html) in the *AWS CloudFormation User Guide* .", "title": "PermissionModel", "type": "string" }, @@ -35392,7 +35340,7 @@ "type": "string" }, "TemplateURL": { - "markdownDescription": "Location of file containing the template body. The URL must point to a template that's located in an Amazon S3 bucket or a Systems Manager document. For more information, go to [Template Anatomy](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-anatomy.html) in the AWS CloudFormation User Guide.\n\nConditional: You must specify only one of the following parameters: `TemplateBody` , `TemplateURL` .", + "markdownDescription": "The URL of a file containing the template body. The URL must point to a template (max size: 1 MB) that's located in an Amazon S3 bucket or a Systems Manager document. The location for an Amazon S3 bucket must start with `https://` .\n\nConditional: You must specify only one of the following parameters: `TemplateBody` , `TemplateURL` .", "title": "TemplateURL", "type": "string" } @@ -35452,12 +35400,12 @@ "items": { "type": "string" }, - "markdownDescription": "The names of one or more AWS accounts for which you want to deploy stack set updates.\n\n*Pattern* : `^[0-9]{12}$`", + "markdownDescription": "The account IDs of the AWS accounts . If you have many account numbers, you can provide those accounts using the `AccountsUrl` property instead.\n\n*Pattern* : `^[0-9]{12}$`", "title": "Accounts", "type": "array" }, "AccountsUrl": { - "markdownDescription": "Returns the value of the `AccountsUrl` property.", + "markdownDescription": "The Amazon S3 URL path to a file that contains a list of AWS account IDs. The file format must be either `.csv` or `.txt` , and the data can be comma-separated or new-line-separated. There is currently a 10MB limit for the data (approximately 800,000 accounts).", "title": "AccountsUrl", "type": "string" }, @@ -35465,7 +35413,7 @@ "items": { "type": "string" }, - "markdownDescription": "The organization root ID or organizational unit (OU) IDs to which StackSets deploys.\n\n*Pattern* : `^(ou-[a-z0-9]{4,32}-[a-z0-9]{8,32}|r-[a-z0-9]{4,32})$`", + "markdownDescription": "The organization root ID or organizational unit (OU) IDs.\n\n*Pattern* : `^(ou-[a-z0-9]{4,32}-[a-z0-9]{8,32}|r-[a-z0-9]{4,32})$`", "title": "OrganizationalUnitIds", "type": "array" } @@ -35487,12 +35435,12 @@ "additionalProperties": false, "properties": { "FailureToleranceCount": { - "markdownDescription": "The number of accounts, per Region, for which this operation can fail before AWS CloudFormation stops the operation in that Region. If the operation is stopped in a Region, AWS CloudFormation doesn't attempt the operation in any subsequent Regions.\n\nConditional: You must specify either `FailureToleranceCount` or `FailureTolerancePercentage` (but not both).", + "markdownDescription": "The number of accounts, per Region, for which this operation can fail before CloudFormation stops the operation in that Region. If the operation is stopped in a Region, CloudFormation doesn't attempt the operation in any subsequent Regions.\n\nConditional: You must specify either `FailureToleranceCount` or `FailureTolerancePercentage` (but not both).", "title": "FailureToleranceCount", "type": "number" }, "FailureTolerancePercentage": { - "markdownDescription": "The percentage of accounts, per Region, for which this stack operation can fail before AWS CloudFormation stops the operation in that Region. If the operation is stopped in a Region, AWS CloudFormation doesn't attempt the operation in any subsequent Regions.\n\nWhen calculating the number of accounts based on the specified percentage, AWS CloudFormation rounds *down* to the next whole number.\n\nConditional: You must specify either `FailureToleranceCount` or `FailureTolerancePercentage` , but not both.", + "markdownDescription": "The percentage of accounts, per Region, for which this stack operation can fail before CloudFormation stops the operation in that Region. If the operation is stopped in a Region, CloudFormation doesn't attempt the operation in any subsequent Regions.\n\nWhen calculating the number of accounts based on the specified percentage, CloudFormation rounds *down* to the next whole number.\n\nConditional: You must specify either `FailureToleranceCount` or `FailureTolerancePercentage` , but not both.", "title": "FailureTolerancePercentage", "type": "number" }, @@ -35502,7 +35450,7 @@ "type": "number" }, "MaxConcurrentPercentage": { - "markdownDescription": "The maximum percentage of accounts in which to perform this operation at one time.\n\nWhen calculating the number of accounts based on the specified percentage, AWS CloudFormation rounds down to the next whole number. This is true except in cases where rounding down would result is zero. In this case, CloudFormation sets the number as one instead.\n\nNote that this setting lets you specify the *maximum* for operations. For large deployments, under certain circumstances the actual number of accounts acted upon concurrently may be lower due to service throttling.\n\nConditional: You must specify either `MaxConcurrentCount` or `MaxConcurrentPercentage` , but not both.", + "markdownDescription": "The maximum percentage of accounts in which to perform this operation at one time.\n\nWhen calculating the number of accounts based on the specified percentage, CloudFormation rounds down to the next whole number. This is true except in cases where rounding down would result is zero. In this case, CloudFormation sets the number as one instead.\n\nNote that this setting lets you specify the *maximum* for operations. For large deployments, under certain circumstances the actual number of accounts acted upon concurrently may be lower due to service throttling.\n\nConditional: You must specify either `MaxConcurrentCount` or `MaxConcurrentPercentage` , but not both.", "title": "MaxConcurrentPercentage", "type": "number" }, @@ -35526,7 +35474,7 @@ "additionalProperties": false, "properties": { "ParameterKey": { - "markdownDescription": "The key associated with the parameter. If you don't specify a key and value for a particular parameter, AWS CloudFormation uses the default value that's specified in your template.", + "markdownDescription": "The key associated with the parameter. If you don't specify a key and value for a particular parameter, CloudFormation uses the default value that's specified in your template.", "title": "ParameterKey", "type": "string" }, @@ -35741,7 +35689,7 @@ "type": "number" }, "Handle": { - "markdownDescription": "A reference to the wait condition handle used to signal this wait condition. Use the `Ref` intrinsic function to specify an [`AWS::CloudFormation::WaitConditionHandle`](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-waitconditionhandle.html) resource.\n\nAnytime you add a `WaitCondition` resource during a stack update, you must associate the wait condition with a new WaitConditionHandle resource. Don't reuse an old wait condition handle that has already been defined in the template. If you reuse a wait condition handle, the wait condition might evaluate old signals from a previous create or update stack command.\n\nUpdates aren't supported.", + "markdownDescription": "A reference to the wait condition handle used to signal this wait condition. Use the `Ref` intrinsic function to specify an [AWS::CloudFormation::WaitConditionHandle](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-waitconditionhandle.html) resource.\n\nAnytime you add a `WaitCondition` resource during a stack update, you must associate the wait condition with a new WaitConditionHandle resource. Don't reuse an old wait condition handle that has already been defined in the template. If you reuse a wait condition handle, the wait condition might evaluate old signals from a previous create or update stack command.\n\nUpdates aren't supported.", "title": "Handle", "type": "string" }, @@ -36625,7 +36573,7 @@ "type": "number" }, "OriginKeepaliveTimeout": { - "markdownDescription": "Specifies how long, in seconds, CloudFront persists its connection to the origin. The minimum timeout is 1 second, the maximum is 60 seconds, and the default (if you don't specify otherwise) is 5 seconds.\n\nFor more information, see [Origin Keep-alive Timeout](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-specify.html#DownloadDistValuesOriginKeepaliveTimeout) in the *Amazon CloudFront Developer Guide* .", + "markdownDescription": "Specifies how long, in seconds, CloudFront persists its connection to the origin. The minimum timeout is 1 second, the maximum is 60 seconds, and the default (if you don't specify otherwise) is 5 seconds.\n\nFor more information, see [Keep-alive timeout (custom origins only)](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-specify.html#DownloadDistValuesOriginKeepaliveTimeout) in the *Amazon CloudFront Developer Guide* .", "title": "OriginKeepaliveTimeout", "type": "number" }, @@ -36635,7 +36583,7 @@ "type": "string" }, "OriginReadTimeout": { - "markdownDescription": "Specifies how long, in seconds, CloudFront waits for a response from the origin. This is also known as the *origin response timeout* . The minimum timeout is 1 second, the maximum is 60 seconds, and the default (if you don't specify otherwise) is 30 seconds.\n\nFor more information, see [Origin Response Timeout](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-specify.html#DownloadDistValuesOriginResponseTimeout) in the *Amazon CloudFront Developer Guide* .", + "markdownDescription": "Specifies how long, in seconds, CloudFront waits for a response from the origin. This is also known as the *origin response timeout* . The minimum timeout is 1 second, the maximum is 60 seconds, and the default (if you don't specify otherwise) is 30 seconds.\n\nFor more information, see [Response timeout (custom origins only)](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-specify.html#DownloadDistValuesOriginResponseTimeout) in the *Amazon CloudFront Developer Guide* .", "title": "OriginReadTimeout", "type": "number" }, @@ -36832,7 +36780,7 @@ "title": "DefaultCacheBehavior" }, "DefaultRootObject": { - "markdownDescription": "The object that you want CloudFront to request from your origin (for example, `index.html` ) when a viewer requests the root URL for your distribution ( `https://www.example.com` ) instead of an object in your distribution ( `https://www.example.com/product-description.html` ). Specifying a default root object avoids exposing the contents of your distribution.\n\nSpecify only the object name, for example, `index.html` . Don't add a `/` before the object name.\n\nIf you don't want to specify a default root object when you create a distribution, include an empty `DefaultRootObject` element.\n\nTo delete the default root object from an existing distribution, update the distribution configuration and include an empty `DefaultRootObject` element.\n\nTo replace the default root object, update the distribution configuration and specify the new object.\n\nFor more information about the default root object, see [Creating a Default Root Object](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/DefaultRootObject.html) in the *Amazon CloudFront Developer Guide* .", + "markdownDescription": "When a viewer requests the root URL for your distribution, the default root object is the object that you want CloudFront to request from your origin. For example, if your root URL is `https://www.example.com` , you can specify CloudFront to return the `index.html` file as the default root object. You can specify a default root object so that viewers see a specific file or object, instead of another object in your distribution (for example, `https://www.example.com/product-description.html` ). A default root object avoids exposing the contents of your distribution.\n\nYou can specify the object name or a path to the object name (for example, `index.html` or `exampleFolderName/index.html` ). Your string can't begin with a forward slash ( `/` ). Only specify the object name or the path to the object.\n\nIf you don't want to specify a default root object when you create a distribution, include an empty `DefaultRootObject` element.\n\nTo delete the default root object from an existing distribution, update the distribution configuration and include an empty `DefaultRootObject` element.\n\nTo replace the default root object, update the distribution configuration and specify the new object.\n\nFor more information about the default root object, see [Specify a default root object](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/DefaultRootObject.html) in the *Amazon CloudFront Developer Guide* .", "title": "DefaultRootObject", "type": "string" }, @@ -39261,7 +39209,7 @@ "type": "array" }, "Field": { - "markdownDescription": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `eventCategory` (required), `eventSource` , and `readOnly` . The following additional fields are available for event data stores: `eventName` , `eventType` , `sessionCredentialFromConsole` , and `userIdentity.arn` .\n\nFor CloudTrail data events, supported fields include `eventCategory` (required), `resources.type` (required), `eventName` , `readOnly` , and `resources.ARN` . The following additional fields are available for event data stores: `eventSource` , `eventType` , `sessionCredentialFromConsole` , and `userIdentity.arn` .\n\nFor CloudTrail network activity events, supported fields include `eventCategory` (required), `eventSource` (required), `eventName` , `errorCode` , and `vpcEndpointId` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - This is an optional field that is only used for management events and data events. This field can be set to `Equals` with a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - This field is only used for management events, data events (for event data stores only), and network activity events.\n\nFor management events for trails, this is an optional field that can be set to `NotEquals` `kms.amazonaws.com` to exclude KMS management events, or `NotEquals` `rdsdata.amazonaws.com` to exclude RDS management events.\n\nFor management and data events for event data stores, you can use it to include or exclude any event source and can use any operator.\n\nFor network activity events, this is a required field that only uses the `Equals` operator. Set this field to the event source for which you want to log network activity events. If you want to log network activity events for multiple event sources, you must create a separate field selector for each event source.\n\nThe following are valid values for network activity events:\n\n- `cloudtrail.amazonaws.com`\n- `ec2.amazonaws.com`\n- `kms.amazonaws.com`\n- `secretsmanager.amazonaws.com`\n- *`eventName`* - This is an optional field that is only used for data events, management events (for event data stores only), and network activity events. You can use any operator with `eventName` . You can use it to \ufb01lter in or \ufb01lter out specific events. You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This field is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n- For CloudTrail network activity events, the value must be `NetworkActivity` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For events outside of AWS , the value must be `ActivityAuditLog` .\n- *`eventType`* - This is an optional field available only for event data stores, which is used to filter management and data events on the event type. For information about available event types, see [CloudTrail record contents](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-event-reference-record-contents.html#ct-event-type) in the *AWS CloudTrail user guide* .\n- *`errorCode`* - This \ufb01eld is only used to filter CloudTrail network activity events and is optional. This is the error code to filter on. Currently, the only valid `errorCode` is `VpceAccessDenied` . `errorCode` can only use the `Equals` operator.\n- *`sessionCredentialFromConsole`* - This is an optional field available only for event data stores, which is used to filter management and data events based on whether the events originated from an AWS Management Console session. `sessionCredentialFromConsole` can only use the `Equals` and `NotEquals` operators.\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator.\n\nFor a list of available resource types for data events, see [Data events](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-data-events-with-cloudtrail.html#logging-data-events) in the *AWS CloudTrail User Guide* .\n\nYou can have only one `resources.type` \ufb01eld per selector. To log events on more than one resource type, add another selector.\n- *`resources.ARN`* - The `resources.ARN` is an optional field for data events. You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value.\n\nFor information about filtering data events on the `resources.ARN` field, see [Filtering data events by resources.ARN](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/filtering-data-events.html#filtering-data-events-resourcearn) in the *AWS CloudTrail User Guide* .\n\n> You can't use the `resources.ARN` field to filter resource types that do not have ARNs.\n- *`userIdentity.arn`* - This is an optional field available only for event data stores, which is used to filter management and data events on the userIdentity ARN. You can use any operator with `userIdentity.arn` . For more information on the userIdentity element, see [CloudTrail userIdentity element](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-event-reference-user-identity.html) in the *AWS CloudTrail User Guide* .\n- *`vpcEndpointId`* - This \ufb01eld is only used to filter CloudTrail network activity events and is optional. This field identifies the VPC endpoint that the request passed through. You can use any operator with `vpcEndpointId` .", + "markdownDescription": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `eventCategory` (required), `eventSource` , and `readOnly` . The following additional fields are available for event data stores: `eventName` , `eventType` , `sessionCredentialFromConsole` , and `userIdentity.arn` .\n\nFor CloudTrail data events, supported fields include `eventCategory` (required), `resources.type` (required), `eventName` , `readOnly` , and `resources.ARN` . The following additional fields are available for event data stores: `eventSource` , `eventType` , `sessionCredentialFromConsole` , and `userIdentity.arn` .\n\nFor CloudTrail network activity events, supported fields include `eventCategory` (required), `eventSource` (required), `eventName` , `errorCode` , and `vpcEndpointId` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n> Selectors don't support the use of wildcards like `*` . To match multiple values with a single condition, you may use `StartsWith` , `EndsWith` , `NotStartsWith` , or `NotEndsWith` to explicitly match the beginning or end of the event field. \n\n- *`readOnly`* - This is an optional field that is only used for management events and data events. This field can be set to `Equals` with a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - This field is only used for management events, data events (for event data stores only), and network activity events.\n\nFor management events for trails, this is an optional field that can be set to `NotEquals` `kms.amazonaws.com` to exclude KMS management events, or `NotEquals` `rdsdata.amazonaws.com` to exclude RDS management events.\n\nFor management and data events for event data stores, you can use it to include or exclude any event source and can use any operator.\n\nFor network activity events, this is a required field that only uses the `Equals` operator. Set this field to the event source for which you want to log network activity events. If you want to log network activity events for multiple event sources, you must create a separate field selector for each event source.\n\nThe following are valid values for network activity events:\n\n- `cloudtrail.amazonaws.com`\n- `ec2.amazonaws.com`\n- `kms.amazonaws.com`\n- `secretsmanager.amazonaws.com`\n- *`eventName`* - This is an optional field that is only used for data events, management events (for event data stores only), and network activity events. You can use any operator with `eventName` . You can use it to \ufb01lter in or \ufb01lter out specific events. You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This field is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n- For CloudTrail network activity events, the value must be `NetworkActivity` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For events outside of AWS , the value must be `ActivityAuditLog` .\n- *`eventType`* - This is an optional field available only for event data stores, which is used to filter management and data events on the event type. For information about available event types, see [CloudTrail record contents](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-event-reference-record-contents.html#ct-event-type) in the *AWS CloudTrail user guide* .\n- *`errorCode`* - This \ufb01eld is only used to filter CloudTrail network activity events and is optional. This is the error code to filter on. Currently, the only valid `errorCode` is `VpceAccessDenied` . `errorCode` can only use the `Equals` operator.\n- *`sessionCredentialFromConsole`* - This is an optional field available only for event data stores, which is used to filter management and data events based on whether the events originated from an AWS Management Console session. `sessionCredentialFromConsole` can only use the `Equals` and `NotEquals` operators.\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator.\n\nFor a list of available resource types for data events, see [Data events](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-data-events-with-cloudtrail.html#logging-data-events) in the *AWS CloudTrail User Guide* .\n\nYou can have only one `resources.type` \ufb01eld per selector. To log events on more than one resource type, add another selector.\n- *`resources.ARN`* - The `resources.ARN` is an optional field for data events. You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value.\n\nFor information about filtering data events on the `resources.ARN` field, see [Filtering data events by resources.ARN](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/filtering-data-events.html#filtering-data-events-resourcearn) in the *AWS CloudTrail User Guide* .\n\n> You can't use the `resources.ARN` field to filter resource types that do not have ARNs.\n- *`userIdentity.arn`* - This is an optional field available only for event data stores, which is used to filter management and data events on the userIdentity ARN. You can use any operator with `userIdentity.arn` . For more information on the userIdentity element, see [CloudTrail userIdentity element](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-event-reference-user-identity.html) in the *AWS CloudTrail User Guide* .\n- *`vpcEndpointId`* - This \ufb01eld is only used to filter CloudTrail network activity events and is optional. This field identifies the VPC endpoint that the request passed through. You can use any operator with `vpcEndpointId` .", "title": "Field", "type": "string" }, @@ -39584,7 +39532,7 @@ "type": "array" }, "Field": { - "markdownDescription": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `eventCategory` (required), `eventSource` , and `readOnly` . The following additional fields are available for event data stores: `eventName` , `eventType` , `sessionCredentialFromConsole` , and `userIdentity.arn` .\n\nFor CloudTrail data events, supported fields include `eventCategory` (required), `resources.type` (required), `eventName` , `readOnly` , and `resources.ARN` . The following additional fields are available for event data stores: `eventSource` , `eventType` , `sessionCredentialFromConsole` , and `userIdentity.arn` .\n\nFor CloudTrail network activity events, supported fields include `eventCategory` (required), `eventSource` (required), `eventName` , `errorCode` , and `vpcEndpointId` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - This is an optional field that is only used for management events and data events. This field can be set to `Equals` with a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - This field is only used for management events, data events (for event data stores only), and network activity events.\n\nFor management events for trails, this is an optional field that can be set to `NotEquals` `kms.amazonaws.com` to exclude KMS management events, or `NotEquals` `rdsdata.amazonaws.com` to exclude RDS management events.\n\nFor management and data events for event data stores, you can use it to include or exclude any event source and can use any operator.\n\nFor network activity events, this is a required field that only uses the `Equals` operator. Set this field to the event source for which you want to log network activity events. If you want to log network activity events for multiple event sources, you must create a separate field selector for each event source.\n\nThe following are valid values for network activity events:\n\n- `cloudtrail.amazonaws.com`\n- `ec2.amazonaws.com`\n- `kms.amazonaws.com`\n- `secretsmanager.amazonaws.com`\n- *`eventName`* - This is an optional field that is only used for data events, management events (for event data stores only), and network activity events. You can use any operator with `eventName` . You can use it to \ufb01lter in or \ufb01lter out specific events. You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This field is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n- For CloudTrail network activity events, the value must be `NetworkActivity` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For events outside of AWS , the value must be `ActivityAuditLog` .\n- *`eventType`* - This is an optional field available only for event data stores, which is used to filter management and data events on the event type. For information about available event types, see [CloudTrail record contents](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-event-reference-record-contents.html#ct-event-type) in the *AWS CloudTrail user guide* .\n- *`errorCode`* - This \ufb01eld is only used to filter CloudTrail network activity events and is optional. This is the error code to filter on. Currently, the only valid `errorCode` is `VpceAccessDenied` . `errorCode` can only use the `Equals` operator.\n- *`sessionCredentialFromConsole`* - This is an optional field available only for event data stores, which is used to filter management and data events based on whether the events originated from an AWS Management Console session. `sessionCredentialFromConsole` can only use the `Equals` and `NotEquals` operators.\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator.\n\nFor a list of available resource types for data events, see [Data events](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-data-events-with-cloudtrail.html#logging-data-events) in the *AWS CloudTrail User Guide* .\n\nYou can have only one `resources.type` \ufb01eld per selector. To log events on more than one resource type, add another selector.\n- *`resources.ARN`* - The `resources.ARN` is an optional field for data events. You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value.\n\nFor information about filtering data events on the `resources.ARN` field, see [Filtering data events by resources.ARN](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/filtering-data-events.html#filtering-data-events-resourcearn) in the *AWS CloudTrail User Guide* .\n\n> You can't use the `resources.ARN` field to filter resource types that do not have ARNs.\n- *`userIdentity.arn`* - This is an optional field available only for event data stores, which is used to filter management and data events on the userIdentity ARN. You can use any operator with `userIdentity.arn` . For more information on the userIdentity element, see [CloudTrail userIdentity element](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-event-reference-user-identity.html) in the *AWS CloudTrail User Guide* .\n- *`vpcEndpointId`* - This \ufb01eld is only used to filter CloudTrail network activity events and is optional. This field identifies the VPC endpoint that the request passed through. You can use any operator with `vpcEndpointId` .", + "markdownDescription": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `eventCategory` (required), `eventSource` , and `readOnly` . The following additional fields are available for event data stores: `eventName` , `eventType` , `sessionCredentialFromConsole` , and `userIdentity.arn` .\n\nFor CloudTrail data events, supported fields include `eventCategory` (required), `resources.type` (required), `eventName` , `readOnly` , and `resources.ARN` . The following additional fields are available for event data stores: `eventSource` , `eventType` , `sessionCredentialFromConsole` , and `userIdentity.arn` .\n\nFor CloudTrail network activity events, supported fields include `eventCategory` (required), `eventSource` (required), `eventName` , `errorCode` , and `vpcEndpointId` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n> Selectors don't support the use of wildcards like `*` . To match multiple values with a single condition, you may use `StartsWith` , `EndsWith` , `NotStartsWith` , or `NotEndsWith` to explicitly match the beginning or end of the event field. \n\n- *`readOnly`* - This is an optional field that is only used for management events and data events. This field can be set to `Equals` with a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - This field is only used for management events, data events (for event data stores only), and network activity events.\n\nFor management events for trails, this is an optional field that can be set to `NotEquals` `kms.amazonaws.com` to exclude KMS management events, or `NotEquals` `rdsdata.amazonaws.com` to exclude RDS management events.\n\nFor management and data events for event data stores, you can use it to include or exclude any event source and can use any operator.\n\nFor network activity events, this is a required field that only uses the `Equals` operator. Set this field to the event source for which you want to log network activity events. If you want to log network activity events for multiple event sources, you must create a separate field selector for each event source.\n\nThe following are valid values for network activity events:\n\n- `cloudtrail.amazonaws.com`\n- `ec2.amazonaws.com`\n- `kms.amazonaws.com`\n- `secretsmanager.amazonaws.com`\n- *`eventName`* - This is an optional field that is only used for data events, management events (for event data stores only), and network activity events. You can use any operator with `eventName` . You can use it to \ufb01lter in or \ufb01lter out specific events. You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This field is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n- For CloudTrail network activity events, the value must be `NetworkActivity` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For events outside of AWS , the value must be `ActivityAuditLog` .\n- *`eventType`* - This is an optional field available only for event data stores, which is used to filter management and data events on the event type. For information about available event types, see [CloudTrail record contents](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-event-reference-record-contents.html#ct-event-type) in the *AWS CloudTrail user guide* .\n- *`errorCode`* - This \ufb01eld is only used to filter CloudTrail network activity events and is optional. This is the error code to filter on. Currently, the only valid `errorCode` is `VpceAccessDenied` . `errorCode` can only use the `Equals` operator.\n- *`sessionCredentialFromConsole`* - This is an optional field available only for event data stores, which is used to filter management and data events based on whether the events originated from an AWS Management Console session. `sessionCredentialFromConsole` can only use the `Equals` and `NotEquals` operators.\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator.\n\nFor a list of available resource types for data events, see [Data events](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-data-events-with-cloudtrail.html#logging-data-events) in the *AWS CloudTrail User Guide* .\n\nYou can have only one `resources.type` \ufb01eld per selector. To log events on more than one resource type, add another selector.\n- *`resources.ARN`* - The `resources.ARN` is an optional field for data events. You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value.\n\nFor information about filtering data events on the `resources.ARN` field, see [Filtering data events by resources.ARN](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/filtering-data-events.html#filtering-data-events-resourcearn) in the *AWS CloudTrail User Guide* .\n\n> You can't use the `resources.ARN` field to filter resource types that do not have ARNs.\n- *`userIdentity.arn`* - This is an optional field available only for event data stores, which is used to filter management and data events on the userIdentity ARN. You can use any operator with `userIdentity.arn` . For more information on the userIdentity element, see [CloudTrail userIdentity element](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-event-reference-user-identity.html) in the *AWS CloudTrail User Guide* .\n- *`vpcEndpointId`* - This \ufb01eld is only used to filter CloudTrail network activity events and is optional. This field identifies the VPC endpoint that the request passed through. You can use any operator with `vpcEndpointId` .", "title": "Field", "type": "string" }, @@ -44075,7 +44023,7 @@ "additionalProperties": false, "properties": { "Category": { - "markdownDescription": "A category defines what kind of action can be taken in the stage, and constrains the provider type for the action. Valid categories are limited to one of the values below.\n\n- `Source`\n- `Build`\n- `Test`\n- `Deploy`\n- `Invoke`\n- `Approval`", + "markdownDescription": "A category defines what kind of action can be taken in the stage, and constrains the provider type for the action. Valid categories are limited to one of the values below.\n\n- `Source`\n- `Build`\n- `Test`\n- `Deploy`\n- `Invoke`\n- `Approval`\n- `Compute`", "title": "Category", "type": "string" }, @@ -45706,12 +45654,12 @@ "title": "CloudWatchLogsConfiguration" }, "EventSource": { - "markdownDescription": "The source of events that your user pool sends for logging. To send error-level logs about user notification activity, set to `userNotification` . To send info-level logs about advanced security features user activity, set to `userAuthEvents` .", + "markdownDescription": "The source of events that your user pool sends for logging. To send error-level logs about user notification activity, set to `userNotification` . To send info-level logs about threat-protection user activity in user pools with the Plus feature plan, set to `userAuthEvents` .", "title": "EventSource", "type": "string" }, "LogLevel": { - "markdownDescription": "The `errorlevel` selection of logs that a user pool sends for detailed activity logging. To send `userNotification` activity with [information about message delivery](https://docs.aws.amazon.com/cognito/latest/developerguide/tracking-quotas-and-usage-in-cloud-watch-logs.html) , choose `ERROR` with `CloudWatchLogsConfiguration` . To send `userAuthEvents` activity with user logs from advanced security features, choose `INFO` with one of `CloudWatchLogsConfiguration` , `FirehoseConfiguration` , or `S3Configuration` .", + "markdownDescription": "The `errorlevel` selection of logs that a user pool sends for detailed activity logging. To send `userNotification` activity with [information about message delivery](https://docs.aws.amazon.com/cognito/latest/developerguide/exporting-quotas-and-usage.html) , choose `ERROR` with `CloudWatchLogsConfiguration` . To send `userAuthEvents` activity with user logs from threat protection with the Plus feature plan, choose `INFO` with one of `CloudWatchLogsConfiguration` , `FirehoseConfiguration` , or `S3Configuration` .", "title": "LogLevel", "type": "string" } @@ -45767,7 +45715,7 @@ "items": { "type": "string" }, - "markdownDescription": "Attributes supported as an alias for this user pool. Possible values: *phone_number* , *email* , or *preferred_username* .", + "markdownDescription": "Attributes supported as an alias for this user pool. Possible values: *phone_number* , *email* , or *preferred_username* . For more information about alias attributes, see [Customizing sign-in attributes](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-settings-attributes.html#user-pool-settings-aliases) .", "title": "AliasAttributes", "type": "array" }, @@ -45775,7 +45723,7 @@ "items": { "type": "string" }, - "markdownDescription": "The attributes to be auto-verified. Possible values: *email* , *phone_number* .", + "markdownDescription": "The attributes that you want your user pool to automatically verify. Possible values: *email* , *phone_number* . For more information see [Verifying contact information at sign-up](https://docs.aws.amazon.com/cognito/latest/developerguide/signing-up-users-in-your-app.html#allowing-users-to-sign-up-and-confirm-themselves) .", "title": "AutoVerifiedAttributes", "type": "array" }, @@ -45786,7 +45734,7 @@ }, "DeviceConfiguration": { "$ref": "#/definitions/AWS::Cognito::UserPool.DeviceConfiguration", - "markdownDescription": "The device-remembering configuration for a user pool. A null value indicates that you have deactivated device remembering in your user pool.\n\n> When you provide a value for any `DeviceConfiguration` field, you activate the Amazon Cognito device-remembering feature.", + "markdownDescription": "The device-remembering configuration for a user pool. Device remembering or device tracking is a \"Remember me on this device\" option for user pools that perform authentication with the device key of a trusted device in the back end, instead of a user-provided MFA code. For more information about device authentication, see [Working with user devices in your user pool](https://docs.aws.amazon.com/cognito/latest/developerguide/amazon-cognito-user-pools-device-tracking.html) . A null value indicates that you have deactivated device remembering in your user pool.\n\n> When you provide a value for any `DeviceConfiguration` field, you activate the Amazon Cognito device-remembering feature. For more infor", "title": "DeviceConfiguration" }, "EmailConfiguration": { @@ -45831,7 +45779,7 @@ "items": { "$ref": "#/definitions/AWS::Cognito::UserPool.SchemaAttribute" }, - "markdownDescription": "An array of schema attributes for the new user pool. These attributes can be standard or custom attributes.", + "markdownDescription": "An array of attributes for the new user pool. You can add custom attributes and modify the properties of default attributes. The specifications in this parameter set the required attributes in your user pool. For more information, see [Working with user attributes](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-settings-attributes.html) .", "title": "Schema", "type": "array" }, @@ -45842,7 +45790,7 @@ }, "SmsConfiguration": { "$ref": "#/definitions/AWS::Cognito::UserPool.SmsConfiguration", - "markdownDescription": "The SMS configuration with the settings that your Amazon Cognito user pool must use to send an SMS message from your AWS account through Amazon Simple Notification Service. To send SMS messages with Amazon SNS in the AWS Region that you want, the Amazon Cognito user pool uses an AWS Identity and Access Management (IAM) role in your AWS account .", + "markdownDescription": "The SMS configuration with the settings that your Amazon Cognito user pool must use to send an SMS message from your AWS account through Amazon Simple Notification Service. To send SMS messages with Amazon SNS in the AWS Region that you want, the Amazon Cognito user pool uses an AWS Identity and Access Management (IAM) role in your AWS account . For more information see [SMS message settings](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-sms-settings.html) .", "title": "SmsConfiguration" }, "SmsVerificationMessage": { @@ -45857,11 +45805,11 @@ }, "UserPoolAddOns": { "$ref": "#/definitions/AWS::Cognito::UserPool.UserPoolAddOns", - "markdownDescription": "User pool add-ons. Contains settings for activation of advanced security features. To log user security information but take no action, set to `AUDIT` . To configure automatic security responses to risky traffic to your user pool, set to `ENFORCED` .\n\nFor more information, see [Adding advanced security to a user pool](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pool-settings-advanced-security.html) .", + "markdownDescription": "User pool add-ons. Contains settings for activation of threat protection. To log user security information but take no action, set to `AUDIT` . To configure automatic security responses to risky traffic to your user pool, set to `ENFORCED` .\n\nFor more information, see [Adding advanced security to a user pool](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pool-settings-advanced-security.html) . To activate this setting, your user pool must be on the [Plus tier](https://docs.aws.amazon.com/cognito/latest/developerguide/feature-plans-features-plus.html) .", "title": "UserPoolAddOns" }, "UserPoolName": { - "markdownDescription": "A string used to name the user pool.", + "markdownDescription": "A friendlhy name for your user pool.", "title": "UserPoolName", "type": "string" }, @@ -45886,7 +45834,7 @@ }, "UsernameConfiguration": { "$ref": "#/definitions/AWS::Cognito::UserPool.UsernameConfiguration", - "markdownDescription": "Case sensitivity on the username input for the selected sign-in option. When case sensitivity is set to `False` (case insensitive), users can sign in with any combination of capital and lowercase letters. For example, `username` , `USERNAME` , or `UserName` , or for email, `email@example.com` or `EMaiL@eXamplE.Com` . For most use cases, set case sensitivity to `False` (case insensitive) as a best practice. When usernames and email addresses are case insensitive, Amazon Cognito treats any variation in case as the same user, and prevents a case variation from being assigned to the same attribute for a different user.\n\nThis configuration is immutable after you set it. For more information, see [UsernameConfigurationType](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_UsernameConfigurationType.html) .", + "markdownDescription": "Sets the case sensitivity option for sign-in usernames. When `CaseSensitive` is `false` (case insensitive), users can sign in with any combination of capital and lowercase letters. For example, `username` , `USERNAME` , or `UserName` , or for email, `email@example.com` or `EMaiL@eXamplE.Com` . For most use cases, set case sensitivity to `false` as a best practice. When usernames and email addresses are case insensitive, Amazon Cognito treats any variation in case as the same user, and prevents a case variation from being assigned to the same attribute for a different user.\n\nWhen `CaseSensitive` is `true` (case sensitive), Amazon Cognito interprets `USERNAME` and `UserName` as distinct users.\n\nThis configuration is immutable after you set it.", "title": "UsernameConfiguration" }, "VerificationMessageTemplate": { @@ -46433,13 +46381,13 @@ "items": { "type": "string" }, - "markdownDescription": "The allowed OAuth scopes. Possible values provided by OAuth are `phone` , `email` , `openid` , and `profile` . Possible values provided by AWS are `aws.cognito.signin.user.admin` . Custom scopes created in Resource Servers are also supported.", + "markdownDescription": "The OAuth 2.0 scopes that you want to permit your app client to authorize. Scopes govern access control to user pool self-service API operations, user data from the `userInfo` endpoint, and third-party APIs. Possible values provided by OAuth are `phone` , `email` , `openid` , and `profile` . Possible values provided by AWS are `aws.cognito.signin.user.admin` . Custom scopes created in Resource Servers are also supported.", "title": "AllowedOAuthScopes", "type": "array" }, "AnalyticsConfiguration": { "$ref": "#/definitions/AWS::Cognito::UserPoolClient.AnalyticsConfiguration", - "markdownDescription": "The user pool analytics configuration for collecting metrics and sending them to your Amazon Pinpoint campaign.\n\n> In AWS Regions where Amazon Pinpoint isn't available, user pools only support sending events to Amazon Pinpoint projects in AWS Region us-east-1. In Regions where Amazon Pinpoint is available, user pools support sending events to Amazon Pinpoint projects within that same Region.", + "markdownDescription": "The user pool analytics configuration for collecting metrics and sending them to your Amazon Pinpoint campaign.\n\nIn AWS Regions where Amazon Pinpoint isn't available, user pools might not have access to analytics or might be configurable with campaigns in the US East (N. Virginia) Region. For more information, see [Using Amazon Pinpoint analytics](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pools-pinpoint-integration.html) .", "title": "AnalyticsConfiguration" }, "AuthSessionValidity": { @@ -46451,17 +46399,17 @@ "items": { "type": "string" }, - "markdownDescription": "A list of allowed redirect (callback) URLs for the IdPs.\n\nA redirect URI must:\n\n- Be an absolute URI.\n- Be registered with the authorization server.\n- Not include a fragment component.\n\nSee [OAuth 2.0 - Redirection Endpoint](https://docs.aws.amazon.com/https://tools.ietf.org/html/rfc6749#section-3.1.2) .\n\nAmazon Cognito requires HTTPS over HTTP except for http://localhost for testing purposes only.\n\nApp callback URLs such as myapp://example are also supported.", + "markdownDescription": "A list of allowed redirect (callback) URLs for the IdPs.\n\nA redirect URI must:\n\n- Be an absolute URI.\n- Be registered with the authorization server. Amazon Cognito doesn't accept authorization requests with `redirect_uri` values that aren't in the list of `CallbackURLs` that you provide in this parameter.\n- Not include a fragment component.\n\nSee [OAuth 2.0 - Redirection Endpoint](https://docs.aws.amazon.com/https://tools.ietf.org/html/rfc6749#section-3.1.2) .\n\nAmazon Cognito requires HTTPS over HTTP except for http://localhost for testing purposes only.\n\nApp callback URLs such as myapp://example are also supported.", "title": "CallbackURLs", "type": "array" }, "ClientName": { - "markdownDescription": "The client name for the user pool client you would like to create.", + "markdownDescription": "A friendly name for the app client that you want to create.", "title": "ClientName", "type": "string" }, "DefaultRedirectURI": { - "markdownDescription": "The default redirect URI. In app clients with one assigned IdP, replaces `redirect_uri` in authentication requests. Must be in the `CallbackURLs` list.\n\nA redirect URI must:\n\n- Be an absolute URI.\n- Be registered with the authorization server.\n- Not include a fragment component.\n\nFor more information, see [Default redirect URI](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-settings-client-apps.html#cognito-user-pools-app-idp-settings-about) .\n\nAmazon Cognito requires HTTPS over HTTP except for http://localhost for testing purposes only.\n\nApp callback URLs such as myapp://example are also supported.", + "markdownDescription": "The default redirect URI. In app clients with one assigned IdP, replaces `redirect_uri` in authentication requests. Must be in the `CallbackURLs` list.", "title": "DefaultRedirectURI", "type": "string" }, @@ -46479,12 +46427,12 @@ "items": { "type": "string" }, - "markdownDescription": "The authentication flows that you want your user pool client to support. For each app client in your user pool, you can sign in your users with any combination of one or more flows, including with a user name and Secure Remote Password (SRP), a user name and password, or a custom authentication process that you define with Lambda functions.\n\n> If you don't specify a value for `ExplicitAuthFlows` , your user client supports `ALLOW_REFRESH_TOKEN_AUTH` , `ALLOW_USER_SRP_AUTH` , and `ALLOW_CUSTOM_AUTH` . \n\nValid values include:\n\n- `ALLOW_USER_AUTH` : Enable selection-based sign-in with `USER_AUTH` . This setting covers username-password, secure remote password (SRP), passwordless, and passkey authentication. This authentiation flow can do username-password and SRP authentication without other `ExplicitAuthFlows` permitting them. For example users can complete an SRP challenge through `USER_AUTH` without the flow `USER_SRP_AUTH` being active for the app client. This flow doesn't include `CUSTOM_AUTH` .\n- `ALLOW_ADMIN_USER_PASSWORD_AUTH` : Enable admin based user password authentication flow `ADMIN_USER_PASSWORD_AUTH` . This setting replaces the `ADMIN_NO_SRP_AUTH` setting. With this authentication flow, your app passes a user name and password to Amazon Cognito in the request, instead of using the Secure Remote Password (SRP) protocol to securely transmit the password.\n- `ALLOW_CUSTOM_AUTH` : Enable Lambda trigger based authentication.\n- `ALLOW_USER_PASSWORD_AUTH` : Enable user password-based authentication. In this flow, Amazon Cognito receives the password in the request instead of using the SRP protocol to verify passwords.\n- `ALLOW_USER_SRP_AUTH` : Enable SRP-based authentication.\n- `ALLOW_REFRESH_TOKEN_AUTH` : Enable authflow to refresh tokens.\n\nIn some environments, you will see the values `ADMIN_NO_SRP_AUTH` , `CUSTOM_AUTH_FLOW_ONLY` , or `USER_PASSWORD_AUTH` . You can't assign these legacy `ExplicitAuthFlows` values to user pool clients at the same time as values that begin with `ALLOW_` ,\nlike `ALLOW_USER_SRP_AUTH` .", + "markdownDescription": "The authentication flows that you want your user pool client to support. For each app client in your user pool, you can sign in your users with any combination of one or more flows, including with a user name and Secure Remote Password (SRP), a user name and password, or a custom authentication process that you define with Lambda functions.\n\n> If you don't specify a value for `ExplicitAuthFlows` , your user client supports `ALLOW_REFRESH_TOKEN_AUTH` , `ALLOW_USER_SRP_AUTH` , and `ALLOW_CUSTOM_AUTH` . \n\nValid values include:\n\n- `ALLOW_USER_AUTH` : Enable selection-based sign-in with `USER_AUTH` . This setting covers username-password, secure remote password (SRP), passwordless, and passkey authentication. This authentiation flow can do username-password and SRP authentication without other `ExplicitAuthFlows` permitting them. For example users can complete an SRP challenge through `USER_AUTH` without the flow `USER_SRP_AUTH` being active for the app client. This flow doesn't include `CUSTOM_AUTH` .\n\nTo activate this setting, your user pool must be in the [Essentials tier](https://docs.aws.amazon.com/cognito/latest/developerguide/feature-plans-features-essentials.html) or higher.\n- `ALLOW_ADMIN_USER_PASSWORD_AUTH` : Enable admin based user password authentication flow `ADMIN_USER_PASSWORD_AUTH` . This setting replaces the `ADMIN_NO_SRP_AUTH` setting. With this authentication flow, your app passes a user name and password to Amazon Cognito in the request, instead of using the Secure Remote Password (SRP) protocol to securely transmit the password.\n- `ALLOW_CUSTOM_AUTH` : Enable Lambda trigger based authentication.\n- `ALLOW_USER_PASSWORD_AUTH` : Enable user password-based authentication. In this flow, Amazon Cognito receives the password in the request instead of using the SRP protocol to verify passwords.\n- `ALLOW_USER_SRP_AUTH` : Enable SRP-based authentication.\n- `ALLOW_REFRESH_TOKEN_AUTH` : Enable authflow to refresh tokens.\n\nIn some environments, you will see the values `ADMIN_NO_SRP_AUTH` , `CUSTOM_AUTH_FLOW_ONLY` , or `USER_PASSWORD_AUTH` . You can't assign these legacy `ExplicitAuthFlows` values to user pool clients at the same time as values that begin with `ALLOW_` ,\nlike `ALLOW_USER_SRP_AUTH` .", "title": "ExplicitAuthFlows", "type": "array" }, "GenerateSecret": { - "markdownDescription": "Boolean to specify whether you want to generate a secret for the user pool client being created.", + "markdownDescription": "When `true` , generates a client secret for the app client. Client secrets are used with server-side and machine-to-machine applications. For more information, see [App client types](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-settings-client-apps.html#user-pool-settings-client-app-client-types) .", "title": "GenerateSecret", "type": "boolean" }, @@ -46497,7 +46445,7 @@ "items": { "type": "string" }, - "markdownDescription": "A list of allowed logout URLs for the IdPs.", + "markdownDescription": "A list of allowed logout URLs for managed login authentication. For more information, see [Logout endpoint](https://docs.aws.amazon.com/cognito/latest/developerguide/logout-endpoint.html) .", "title": "LogoutURLs", "type": "array" }, @@ -46523,17 +46471,17 @@ "items": { "type": "string" }, - "markdownDescription": "A list of provider names for the identity providers (IdPs) that are supported on this client. The following are supported: `COGNITO` , `Facebook` , `Google` , `SignInWithApple` , and `LoginWithAmazon` . You can also specify the names that you configured for the SAML and OIDC IdPs in your user pool, for example `MySAMLIdP` or `MyOIDCIdP` .\n\nThis setting applies to providers that you can access with the [hosted UI and OAuth 2.0 authorization server](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pools-app-integration.html) . The removal of `COGNITO` from this list doesn't prevent authentication operations for local users with the user pools API in an AWS SDK. The only way to prevent API-based authentication is to block access with a [AWS WAF rule](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-waf.html) .", + "markdownDescription": "A list of provider names for the identity providers (IdPs) that are supported on this client. The following are supported: `COGNITO` , `Facebook` , `Google` , `SignInWithApple` , and `LoginWithAmazon` . You can also specify the names that you configured for the SAML and OIDC IdPs in your user pool, for example `MySAMLIdP` or `MyOIDCIdP` .\n\nThis setting applies to providers that you can access with [managed login](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pools-managed-login.html) . The removal of `COGNITO` from this list doesn't prevent authentication operations for local users with the user pools API in an AWS SDK. The only way to prevent API-based authentication is to block access with a [AWS WAF rule](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-waf.html) .", "title": "SupportedIdentityProviders", "type": "array" }, "TokenValidityUnits": { "$ref": "#/definitions/AWS::Cognito::UserPoolClient.TokenValidityUnits", - "markdownDescription": "The units in which the validity times are represented. The default unit for RefreshToken is days, and default for ID and access tokens are hours.", + "markdownDescription": "The units that validity times are represented in. The default unit for refresh tokens is days, and the default for ID and access tokens are hours.", "title": "TokenValidityUnits" }, "UserPoolId": { - "markdownDescription": "The user pool ID for the user pool where you want to create a user pool client.", + "markdownDescription": "The ID of the user pool where you want to create an app client.", "title": "UserPoolId", "type": "string" }, @@ -46749,12 +46697,12 @@ "additionalProperties": false, "properties": { "Description": { - "markdownDescription": "A string containing the description of the group.", + "markdownDescription": "A description of the group that you're creating.", "title": "Description", "type": "string" }, "GroupName": { - "markdownDescription": "The name of the group. Must be unique.", + "markdownDescription": "A name for the group. This name must be unique in your user pool.", "title": "GroupName", "type": "string" }, @@ -46764,12 +46712,12 @@ "type": "number" }, "RoleArn": { - "markdownDescription": "The role Amazon Resource Name (ARN) for the group.", + "markdownDescription": "The Amazon Resource Name (ARN) for the IAM role that you want to associate with the group. A group role primarily declares a preferred role for the credentials that you get from an identity pool. Amazon Cognito ID tokens have a `cognito:preferred_role` claim that presents the highest-precedence group that a user belongs to. Both ID and access tokens also contain a `cognito:groups` claim that list all the groups that a user is a member of.", "title": "RoleArn", "type": "string" }, "UserPoolId": { - "markdownDescription": "The user pool ID for the user pool.", + "markdownDescription": "The ID of the user pool where you want to create a user group.", "title": "UserPoolId", "type": "string" } @@ -46836,7 +46784,7 @@ "additionalProperties": false, "properties": { "AttributeMapping": { - "markdownDescription": "A mapping of IdP attributes to standard and custom user pool attributes.", + "markdownDescription": "A mapping of IdP attributes to standard and custom user pool attributes. Specify a user pool attribute as the key of the key-value pair, and the IdP attribute claim name as the value.", "title": "AttributeMapping", "type": "object" }, @@ -46844,7 +46792,7 @@ "items": { "type": "string" }, - "markdownDescription": "A list of IdP identifiers.", + "markdownDescription": "An array of IdP identifiers, for example `\"IdPIdentifiers\": [ \"MyIdP\", \"MyIdP2\" ]` . Identifiers are friendly names that you can pass in the `idp_identifier` query parameter of requests to the [Authorize endpoint](https://docs.aws.amazon.com/cognito/latest/developerguide/authorization-endpoint.html) to silently redirect to sign-in with the associated IdP. Identifiers in a domain format also enable the use of [email-address matching with SAML providers](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pools-managing-saml-idp-naming.html) .", "title": "IdpIdentifiers", "type": "array" }, @@ -46854,17 +46802,17 @@ "type": "object" }, "ProviderName": { - "markdownDescription": "The IdP name.", + "markdownDescription": "The name that you want to assign to the IdP. You can pass the identity provider name in the `identity_provider` query parameter of requests to the [Authorize endpoint](https://docs.aws.amazon.com/cognito/latest/developerguide/authorization-endpoint.html) to silently redirect to sign-in with the associated IdP.", "title": "ProviderName", "type": "string" }, "ProviderType": { - "markdownDescription": "The IdP type.", + "markdownDescription": "The type of IdP that you want to add. Amazon Cognito supports OIDC, SAML 2.0, Login With Amazon, Sign In With Apple, Google, and Facebook IdPs.", "title": "ProviderType", "type": "string" }, "UserPoolId": { - "markdownDescription": "The user pool ID.", + "markdownDescription": "The Id of the user pool where you want to create an IdP.", "title": "UserPoolId", "type": "string" } @@ -46951,7 +46899,7 @@ "type": "array" }, "UserPoolId": { - "markdownDescription": "The user pool ID for the user pool.", + "markdownDescription": "The ID of the user pool where you want to create a resource server.", "title": "UserPoolId", "type": "string" } @@ -47319,7 +47267,7 @@ "type": "string" }, "UserPoolId": { - "markdownDescription": "The user pool ID for the user pool.", + "markdownDescription": "The ID of the user pool.", "title": "UserPoolId", "type": "string" } @@ -47388,7 +47336,7 @@ "properties": { "ClientMetadata": { "additionalProperties": true, - "markdownDescription": "A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.\n\nYou create custom workflows by assigning AWS Lambda functions to user pool triggers. When you use the AdminCreateUser API action, Amazon Cognito invokes the function that is assigned to the *pre sign-up* trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a `clientMetadata` attribute, which provides the data that you assigned to the ClientMetadata parameter in your AdminCreateUser request. In your function code in AWS Lambda , you can process the `clientMetadata` value to enhance your workflow for your specific needs.\n\nFor more information, see [Customizing user pool Workflows with Lambda Triggers](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-identity-pools-working-with-aws-lambda-triggers.html) in the *Amazon Cognito Developer Guide* .\n\n> When you use the ClientMetadata parameter, remember that Amazon Cognito won't do the following:\n> \n> - Store the ClientMetadata value. This data is available only to AWS Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration doesn't include triggers, the ClientMetadata parameter serves no purpose.\n> - Validate the ClientMetadata value.\n> - Encrypt the ClientMetadata value. Don't use Amazon Cognito to provide sensitive information.", + "markdownDescription": "A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.\n\nYou create custom workflows by assigning AWS Lambda functions to user pool triggers. When you use the AdminCreateUser API action, Amazon Cognito invokes the function that is assigned to the *pre sign-up* trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a `ClientMetadata` attribute, which provides the data that you assigned to the ClientMetadata parameter in your AdminCreateUser request. In your function code in AWS Lambda , you can process the `clientMetadata` value to enhance your workflow for your specific needs.\n\nFor more information, see [Using Lambda triggers](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-identity-pools-working-with-aws-lambda-triggers.html) in the *Amazon Cognito Developer Guide* .\n\n> When you use the `ClientMetadata` parameter, note that Amazon Cognito won't do the following:\n> \n> - Store the `ClientMetadata` value. This data is available only to AWS Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration doesn't include triggers, the `ClientMetadata` parameter serves no purpose.\n> - Validate the `ClientMetadata` value.\n> - Encrypt the `ClientMetadata` value. Don't send sensitive information in this parameter.", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" @@ -47401,17 +47349,17 @@ "items": { "type": "string" }, - "markdownDescription": "Specify `\"EMAIL\"` if email will be used to send the welcome message. Specify `\"SMS\"` if the phone number will be used. The default value is `\"SMS\"` . You can specify more than one value.", + "markdownDescription": "Specify `EMAIL` if email will be used to send the welcome message. Specify `SMS` if the phone number will be used. The default value is `SMS` . You can specify more than one value.", "title": "DesiredDeliveryMediums", "type": "array" }, "ForceAliasCreation": { - "markdownDescription": "This parameter is used only if the `phone_number_verified` or `email_verified` attribute is set to `True` . Otherwise, it is ignored.\n\nIf this parameter is set to `True` and the phone number or email address specified in the UserAttributes parameter already exists as an alias with a different user, the API call will migrate the alias from the previous user to the newly created user. The previous user will no longer be able to log in using that alias.\n\nIf this parameter is set to `False` , the API throws an `AliasExistsException` error if the alias already exists. The default value is `False` .", + "markdownDescription": "This parameter is used only if the `phone_number_verified` or `email_verified` attribute is set to `True` . Otherwise, it is ignored.\n\nIf this parameter is set to `True` and the phone number or email address specified in the `UserAttributes` parameter already exists as an alias with a different user, this request migrates the alias from the previous user to the newly-created user. The previous user will no longer be able to log in using that alias.\n\nIf this parameter is set to `False` , the API throws an `AliasExistsException` error if the alias already exists. The default value is `False` .", "title": "ForceAliasCreation", "type": "boolean" }, "MessageAction": { - "markdownDescription": "Set to `RESEND` to resend the invitation message to a user that already exists and reset the expiration limit on the user's account. Set to `SUPPRESS` to suppress sending the message. You can specify only one value.", + "markdownDescription": "Set to `RESEND` to resend the invitation message to a user that already exists, and to reset the temporary-password duration with a new temporary password. Set to `SUPPRESS` to suppress sending the message. You can specify only one value.", "title": "MessageAction", "type": "string" }, @@ -47424,7 +47372,7 @@ "type": "array" }, "UserPoolId": { - "markdownDescription": "The user pool ID for the user pool where the user will be created.", + "markdownDescription": "The ID of the user pool where you want to create a user.", "title": "UserPoolId", "type": "string" }, @@ -47525,7 +47473,7 @@ "type": "string" }, "UserPoolId": { - "markdownDescription": "The user pool ID for the user pool.", + "markdownDescription": "The ID of the user pool that contains the group that you want to add the user to.", "title": "UserPoolId", "type": "string" }, @@ -50995,7 +50943,7 @@ "type": "string" }, "SourcePhoneNumberArn": { - "markdownDescription": "The claimed phone number ARN that was previously imported from the external service, such as Amazon Pinpoint. If it is from Amazon Pinpoint, it looks like the ARN of the phone number that was imported from Amazon Pinpoint.", + "markdownDescription": "The claimed phone number ARN that was previously imported from the external service, such as AWS End User Messaging. If it is from AWS End User Messaging, it looks like the ARN of the phone number that was imported from AWS End User Messaging.", "title": "SourcePhoneNumberArn", "type": "string" }, @@ -52855,12 +52803,12 @@ "type": "string" }, "FirstName": { - "markdownDescription": "The first name. This is required if you are using Amazon Connect or SAML for identity management.", + "markdownDescription": "The first name. This is required if you are using Amazon Connect or SAML for identity management. Inputs must be in Unicode Normalization Form C (NFC). Text containing characters in a non-NFC form (for example, decomposed characters or combining marks) are not accepted.", "title": "FirstName", "type": "string" }, "LastName": { - "markdownDescription": "The last name. This is required if you are using Amazon Connect or SAML for identity management.", + "markdownDescription": "The last name. This is required if you are using Amazon Connect or SAML for identity management. Inputs must be in Unicode Normalization Form C (NFC). Text containing characters in a non-NFC form (for example, decomposed characters or combining marks) are not accepted.", "title": "LastName", "type": "string" }, @@ -55584,7 +55532,7 @@ "type": "string" }, "Location": { - "markdownDescription": "*[Custom snapshot policies only]* Specifies the destination for snapshots created by the policy. To create snapshots in the same Region as the source resource, specify `CLOUD` . To create snapshots on the same Outpost as the source resource, specify `OUTPOST_LOCAL` . If you omit this parameter, `CLOUD` is used by default.\n\nIf the policy targets resources in an AWS Region , then you must create snapshots in the same Region as the source resource. If the policy targets resources on an Outpost, then you can create snapshots on the same Outpost as the source resource, or in the Region of that Outpost.", + "markdownDescription": "*[Custom snapshot policies only]* Specifies the destination for snapshots created by the policy. The allowed destinations depend on the location of the targeted resources.\n\n- If the policy targets resources in a Region, then you must create snapshots in the same Region as the source resource.\n- If the policy targets resources in a Local Zone, you can create snapshots in the same Local Zone or in its parent Region.\n- If the policy targets resources on an Outpost, then you can create snapshots on the same Outpost or in its parent Region.\n\nSpecify one of the following values:\n\n- To create snapshots in the same Region as the source resource, specify `CLOUD` .\n- To create snapshots in the same Local Zone as the source resource, specify `LOCAL_ZONE` .\n- To create snapshots on the same Outpost as the source resource, specify `OUTPOST_LOCAL` .\n\nDefault: `CLOUD`", "title": "Location", "type": "string" }, @@ -55961,7 +55909,7 @@ "items": { "type": "string" }, - "markdownDescription": "*[Custom snapshot and AMI policies only]* The location of the resources to backup. If the source resources are located in an AWS Region , specify `CLOUD` . If the source resources are located on an Outpost in your account, specify `OUTPOST` .\n\nIf you specify `OUTPOST` , Amazon Data Lifecycle Manager backs up all resources of the specified type with matching target tags across all of the Outposts in your account.", + "markdownDescription": "*[Custom snapshot and AMI policies only]* The location of the resources to backup.\n\n- If the source resources are located in a Region, specify `CLOUD` . In this case, the policy targets all resources of the specified type with matching target tags across all Availability Zones in the Region.\n- *[Custom snapshot policies only]* If the source resources are located in a Local Zone, specify `LOCAL_ZONE` . In this case, the policy targets all resources of the specified type with matching target tags across all Local Zones in the Region.\n- If the source resources are located on an Outpost in your account, specify `OUTPOST` . In this case, the policy targets all resources of the specified type with matching target tags across all of the Outposts in your account.", "title": "ResourceLocations", "type": "array" }, @@ -56066,7 +56014,7 @@ "items": { "$ref": "#/definitions/AWS::DLM::LifecyclePolicy.CrossRegionCopyRule" }, - "markdownDescription": "Specifies a rule for copying snapshots or AMIs across regions.\n\n> You can't specify cross-Region copy rules for policies that create snapshots on an Outpost. If the policy creates snapshots in a Region, then snapshots can be copied to up to three Regions or Outposts.", + "markdownDescription": "Specifies a rule for copying snapshots or AMIs across Regions.\n\n> You can't specify cross-Region copy rules for policies that create snapshots on an Outpost or in a Local Zone. If the policy creates snapshots in a Region, then snapshots can be copied to up to three Regions or Outposts.", "title": "CrossRegionCopyRules", "type": "array" }, @@ -61710,7 +61658,7 @@ "type": "string" }, "Subdirectory": { - "markdownDescription": "Specifies a mount path for your Amazon EFS file system. This is where DataSync reads or writes data (depending on if this is a source or destination location) on your file system.\n\nBy default, DataSync uses the root directory (or [access point](https://docs.aws.amazon.com/efs/latest/ug/efs-access-points.html) if you provide one by using `AccessPointArn` ). You can also include subdirectories using forward slashes (for example, `/path/to/folder` ).", + "markdownDescription": "Specifies a mount path for your Amazon EFS file system. This is where DataSync reads or writes data on your file system (depending on if this is a source or destination location).\n\nBy default, DataSync uses the root directory (or [access point](https://docs.aws.amazon.com/efs/latest/ug/efs-access-points.html) if you provide one by using `AccessPointArn` ). You can also include subdirectories using forward slashes (for example, `/path/to/folder` ).", "title": "Subdirectory", "type": "string" }, @@ -61808,7 +61756,7 @@ "additionalProperties": false, "properties": { "FsxFilesystemArn": { - "markdownDescription": "The Amazon Resource Name (ARN) for the FSx for Lustre file system.", + "markdownDescription": "Specifies the Amazon Resource Name (ARN) of the FSx for Lustre file system.", "title": "FsxFilesystemArn", "type": "string" }, @@ -61821,7 +61769,7 @@ "type": "array" }, "Subdirectory": { - "markdownDescription": "A subdirectory in the location's path. This subdirectory in the FSx for Lustre file system is used to read data from the FSx for Lustre source location or write data to the FSx for Lustre destination.", + "markdownDescription": "Specifies a mount path for your FSx for Lustre file system. The path can include subdirectories.\n\nWhen the location is used as a source, DataSync reads data from the mount path. When the location is used as a destination, DataSync writes data to the mount path. If you don't include this parameter, DataSync uses the file system's root directory ( `/` ).", "title": "Subdirectory", "type": "string" }, @@ -61829,7 +61777,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "The key-value pair that represents a tag that you want to add to the resource. The value can be an empty string. This value helps you manage, filter, and search for your resources. We recommend that you create a name tag for your location.", + "markdownDescription": "Specifies labels that help you categorize, filter, and search for your AWS resources. We recommend creating at least a name tag for your location.", "title": "Tags", "type": "array" } @@ -61914,7 +61862,7 @@ "type": "string" }, "Subdirectory": { - "markdownDescription": "Specifies a path to the file share in the SVM where you'll copy your data.\n\nYou can specify a junction path (also known as a mount point), qtree path (for NFS file shares), or share name (for SMB file shares). For example, your mount path might be `/vol1` , `/vol1/tree1` , or `/share1` .\n\n> Don't specify a junction path in the SVM's root volume. For more information, see [Managing FSx for ONTAP storage virtual machines](https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/managing-svms.html) in the *Amazon FSx for NetApp ONTAP User Guide* .", + "markdownDescription": "Specifies a path to the file share in the SVM where you want to transfer data to or from.\n\nYou can specify a junction path (also known as a mount point), qtree path (for NFS file shares), or share name (for SMB file shares). For example, your mount path might be `/vol1` , `/vol1/tree1` , or `/share1` .\n\n> Don't specify a junction path in the SVM's root volume. For more information, see [Managing FSx for ONTAP storage virtual machines](https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/managing-svms.html) in the *Amazon FSx for NetApp ONTAP User Guide* .", "title": "Subdirectory", "type": "string" }, @@ -61999,7 +61947,7 @@ "additionalProperties": false, "properties": { "Domain": { - "markdownDescription": "Specifies the fully qualified domain name (FQDN) of the Microsoft Active Directory that your storage virtual machine (SVM) belongs to.\n\nIf you have multiple domains in your environment, configuring this setting makes sure that DataSync connects to the right SVM.", + "markdownDescription": "Specifies the name of the Windows domain that your storage virtual machine (SVM) belongs to.\n\nIf you have multiple domains in your environment, configuring this setting makes sure that DataSync connects to the right SVM.\n\nIf you have multiple Active Directory domains in your environment, configuring this parameter makes sure that DataSync connects to the right SVM.", "title": "Domain", "type": "string" }, @@ -62203,7 +62151,7 @@ "additionalProperties": false, "properties": { "Domain": { - "markdownDescription": "Specifies the name of the Microsoft Active Directory domain that the FSx for Windows File Server file system belongs to.\n\nIf you have multiple Active Directory domains in your environment, configuring this parameter makes sure that DataSync connects to the right file system.", + "markdownDescription": "Specifies the name of the Windows domain that the FSx for Windows File Server file system belongs to.\n\nIf you have multiple Active Directory domains in your environment, configuring this parameter makes sure that DataSync connects to the right file system.", "title": "Domain", "type": "string" }, @@ -71794,7 +71742,7 @@ "type": "string" }, "PropagateTagsToVolumeOnCreation": { - "markdownDescription": "Indicates whether to assign the tags from the instance to all of the volumes attached to the instance at launch. If you specify `true` and you assign tags to the instance, those tags are automatically assigned to all of the volumes that you attach to the instance at launch. If you specify `false` , those tags are not assigned to the attached volumes.", + "markdownDescription": "Indicates whether to assign the tags specified in the `Tags` property to the volumes specified in the `BlockDeviceMappings` property.\n\nNote that using this feature does not assign the tags to volumes that are created separately and then attached using `AWS::EC2::VolumeAttachment` .", "title": "PropagateTagsToVolumeOnCreation", "type": "boolean" }, @@ -76958,7 +76906,7 @@ "type": "string" }, "GroupName": { - "markdownDescription": "The name of the security group.\n\nConstraints: Up to 255 characters in length. Cannot start with `sg-` .\n\nValid characters: a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=&;{}!$*", + "markdownDescription": "[Default VPC] The name of the security group. For security groups for a default VPC you can specify either the ID or the name of the security group. For security groups for a nondefault VPC, you must specify the ID of the security group.", "title": "GroupName", "type": "string" }, @@ -77418,7 +77366,7 @@ "type": "array" }, "SecondaryPrivateIpAddressCount": { - "markdownDescription": "The number of secondary private IPv4 addresses. You can't specify this option and specify more than one private IP address using the private IP addresses option. You cannot specify this option if you're launching more than one instance in a [RunInstances](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances.html) request.", + "markdownDescription": "The number of secondary private IPv4 addresses. You can\u2019t specify this parameter and also specify a secondary private IP address using the `PrivateIpAddress` parameter.", "title": "SecondaryPrivateIpAddressCount", "type": "number" }, @@ -83122,7 +83070,7 @@ "items": { "$ref": "#/definitions/AWS::ECS::Cluster.ClusterSettings" }, - "markdownDescription": "The settings to use when creating a cluster. This parameter is used to turn on CloudWatch Container Insights for a cluster.", + "markdownDescription": "The settings to use when creating a cluster. This parameter is used to turn on CloudWatch Container Insights with enhanced observability or CloudWatch Container Insights for a cluster.\n\nContainer Insights with enhanced observability provides all the Container Insights metrics, plus additional task and container metrics. This version supports enhanced observability for Amazon ECS clusters using the Amazon EC2 and Fargate launch types. After you configure Container Insights with enhanced observability on Amazon ECS, Container Insights auto-collects detailed infrastructure telemetry from the cluster level down to the container level in your environment and displays these critical performance data in curated dashboards removing the heavy lifting in observability set-up.\n\nFor more information, see [Monitor Amazon ECS containers using Container Insights with enhanced observability](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/cloudwatch-container-insights.html) in the *Amazon Elastic Container Service Developer Guide* .", "title": "ClusterSettings", "type": "array" }, @@ -83216,7 +83164,7 @@ "type": "string" }, "Value": { - "markdownDescription": "The value to set for the cluster setting. The supported values are `enabled` and `disabled` .\n\nIf you set `name` to `containerInsights` and `value` to `enabled` , CloudWatch Container Insights will be on for the cluster, otherwise it will be off unless the `containerInsights` account setting is turned on. If a cluster value is specified, it will override the `containerInsights` value set with [PutAccountSetting](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_PutAccountSetting.html) or [PutAccountSettingDefault](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_PutAccountSettingDefault.html) .", + "markdownDescription": "The value to set for the cluster setting. The supported values are `enhanced` , `enabled` , and `disabled` .\n\nTo use Container Insights with enhanced observability, set the `containerInsights` account setting to `enhanced` .\n\nTo use Container Insights, set the `containerInsights` account setting to `enabled` .\n\nIf a cluster value is specified, it will override the `containerInsights` value set with [PutAccountSetting](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_PutAccountSetting.html) or [PutAccountSettingDefault](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_PutAccountSettingDefault.html) .", "title": "Value", "type": "string" } @@ -83513,7 +83461,7 @@ "items": { "$ref": "#/definitions/AWS::ECS::Service.CapacityProviderStrategyItem" }, - "markdownDescription": "The capacity provider strategy to use for the service.\n\nIf a `capacityProviderStrategy` is specified, the `launchType` parameter must be omitted. If no `capacityProviderStrategy` or `launchType` is specified, the `defaultCapacityProviderStrategy` for the cluster is used.\n\nA capacity provider strategy may contain a maximum of 6 capacity providers.", + "markdownDescription": "The capacity provider strategy to use for the service.\n\nIf a `capacityProviderStrategy` is specified, the `launchType` parameter must be omitted. If no `capacityProviderStrategy` or `launchType` is specified, the `defaultCapacityProviderStrategy` for the cluster is used.\n\nA capacity provider strategy can contain a maximum of 20 capacity providers.", "title": "CapacityProviderStrategy", "type": "array" }, @@ -83672,7 +83620,7 @@ "additionalProperties": false, "properties": { "AssignPublicIp": { - "markdownDescription": "Whether the task's elastic network interface receives a public IP address. The default value is `DISABLED` .", + "markdownDescription": "Whether the task's elastic network interface receives a public IP address. The default value is `ENABLED` .", "title": "AssignPublicIp", "type": "string" }, @@ -83779,7 +83727,7 @@ "title": "DeploymentCircuitBreaker" }, "MaximumPercent": { - "markdownDescription": "If a service is using the rolling update ( `ECS` ) deployment type, the `maximumPercent` parameter represents an upper limit on the number of your service's tasks that are allowed in the `RUNNING` or `PENDING` state during a deployment, as a percentage of the `desiredCount` (rounded down to the nearest integer). This parameter enables you to define the deployment batch size. For example, if your service is using the `REPLICA` service scheduler and has a `desiredCount` of four tasks and a `maximumPercent` value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default `maximumPercent` value for a service using the `REPLICA` service scheduler is 200%.\n\nThe Amazon ECS scheduler uses this parameter to replace unhealthy tasks by starting replacement tasks first and then stopping the unhealthy tasks, as long as cluster resources for starting replacement tasks are available. For more information about how the scheduler replaces unhealthy tasks, see [Amazon ECS services](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs_services.html) .\n\nIf a service is using either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types, and tasks in the service use the EC2 launch type, the *maximum percent* value is set to the default value. The *maximum percent* value is used to define the upper limit on the number of the tasks in the service that remain in the `RUNNING` state while the container instances are in the `DRAINING` state.\n\n> You can't specify a custom `maximumPercent` value for a service that uses either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and has tasks that use the EC2 launch type. \n\nIf the tasks in the service use the Fargate launch type, the maximum percent value is not used, although it is returned when describing your service.", + "markdownDescription": "If a service is using the rolling update ( `ECS` ) deployment type, the `maximumPercent` parameter represents an upper limit on the number of your service's tasks that are allowed in the `RUNNING` or `PENDING` state during a deployment, as a percentage of the `desiredCount` (rounded down to the nearest integer). This parameter enables you to define the deployment batch size. For example, if your service is using the `REPLICA` service scheduler and has a `desiredCount` of four tasks and a `maximumPercent` value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default `maximumPercent` value for a service using the `REPLICA` service scheduler is 200%.\n\nThe Amazon ECS scheduler uses this parameter to replace unhealthy tasks by starting replacement tasks first and then stopping the unhealthy tasks, as long as cluster resources for starting replacement tasks are available. For more information about how the scheduler replaces unhealthy tasks, see [Amazon ECS services](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs_services.html) .\n\nIf a service is using either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types, and tasks in the service use the EC2 launch type, the *maximum percent* value is set to the default value. The *maximum percent* value is used to define the upper limit on the number of the tasks in the service that remain in the `RUNNING` state while the container instances are in the `DRAINING` state.\n\n> You can't specify a custom `maximumPercent` value for a service that uses either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and has tasks that use the EC2 launch type. \n\nIf the service uses either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types, and the tasks in the service use the Fargate launch type, the maximum percent value is not used. The value is still returned when describing your service.", "title": "MaximumPercent", "type": "number" }, @@ -83865,7 +83813,7 @@ }, "Options": { "additionalProperties": true, - "markdownDescription": "The configuration options to send to the log driver.\n\nThe options you can specify depend on the log driver. Some of the options you can specify when you use the `awslogs` log driver to route logs to Amazon CloudWatch include the following:\n\n- **awslogs-create-group** - Required: No\n\nSpecify whether you want the log group to be created automatically. If this option isn't specified, it defaults to `false` .\n\n> Your IAM policy must include the `logs:CreateLogGroup` permission before you attempt to use `awslogs-create-group` .\n- **awslogs-region** - Required: Yes\n\nSpecify the AWS Region that the `awslogs` log driver is to send your Docker logs to. You can choose to send all of your logs from clusters in different Regions to a single region in CloudWatch Logs. This is so that they're all visible in one location. Otherwise, you can separate them by Region for more granularity. Make sure that the specified log group exists in the Region that you specify with this option.\n- **awslogs-group** - Required: Yes\n\nMake sure to specify a log group that the `awslogs` log driver sends its log streams to.\n- **awslogs-stream-prefix** - Required: Yes, when using the Fargate launch type.Optional for the EC2 launch type, required for the Fargate launch type.\n\nUse the `awslogs-stream-prefix` option to associate a log stream with the specified prefix, the container name, and the ID of the Amazon ECS task that the container belongs to. If you specify a prefix with this option, then the log stream takes the format `prefix-name/container-name/ecs-task-id` .\n\nIf you don't specify a prefix with this option, then the log stream is named after the container ID that's assigned by the Docker daemon on the container instance. Because it's difficult to trace logs back to the container that sent them with just the Docker container ID (which is only available on the container instance), we recommend that you specify a prefix with this option.\n\nFor Amazon ECS services, you can use the service name as the prefix. Doing so, you can trace log streams to the service that the container belongs to, the name of the container that sent them, and the ID of the task that the container belongs to.\n\nYou must specify a stream-prefix for your logs to have your logs appear in the Log pane when using the Amazon ECS console.\n- **awslogs-datetime-format** - Required: No\n\nThis option defines a multiline start pattern in Python `strftime` format. A log message consists of a line that matches the pattern and any following lines that don\u2019t match the pattern. The matched line is the delimiter between log messages.\n\nOne example of a use case for using this format is for parsing output such as a stack dump, which might otherwise be logged in multiple entries. The correct pattern allows it to be captured in a single entry.\n\nFor more information, see [awslogs-datetime-format](https://docs.aws.amazon.com/https://docs.docker.com/config/containers/logging/awslogs/#awslogs-datetime-format) .\n\nYou cannot configure both the `awslogs-datetime-format` and `awslogs-multiline-pattern` options.\n\n> Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.\n- **awslogs-multiline-pattern** - Required: No\n\nThis option defines a multiline start pattern that uses a regular expression. A log message consists of a line that matches the pattern and any following lines that don\u2019t match the pattern. The matched line is the delimiter between log messages.\n\nFor more information, see [awslogs-multiline-pattern](https://docs.aws.amazon.com/https://docs.docker.com/config/containers/logging/awslogs/#awslogs-multiline-pattern) .\n\nThis option is ignored if `awslogs-datetime-format` is also configured.\n\nYou cannot configure both the `awslogs-datetime-format` and `awslogs-multiline-pattern` options.\n\n> Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.\n- **mode** - Required: No\n\nValid values: `non-blocking` | `blocking`\n\nThis option defines the delivery mode of log messages from the container to CloudWatch Logs. The delivery mode you choose affects application availability when the flow of logs from container to CloudWatch is interrupted.\n\nIf you use the `blocking` mode and the flow of logs to CloudWatch is interrupted, calls from container code to write to the `stdout` and `stderr` streams will block. The logging thread of the application will block as a result. This may cause the application to become unresponsive and lead to container healthcheck failure.\n\nIf you use the `non-blocking` mode, the container's logs are instead stored in an in-memory intermediate buffer configured with the `max-buffer-size` option. This prevents the application from becoming unresponsive when logs cannot be sent to CloudWatch. We recommend using this mode if you want to ensure service availability and are okay with some log loss. For more information, see [Preventing log loss with non-blocking mode in the `awslogs` container log driver](https://docs.aws.amazon.com/containers/preventing-log-loss-with-non-blocking-mode-in-the-awslogs-container-log-driver/) .\n- **max-buffer-size** - Required: No\n\nDefault value: `1m`\n\nWhen `non-blocking` mode is used, the `max-buffer-size` log option controls the size of the buffer that's used for intermediate message storage. Make sure to specify an adequate buffer size based on your application. When the buffer fills up, further logs cannot be stored. Logs that cannot be stored are lost.\n\nTo route logs using the `splunk` log router, you need to specify a `splunk-token` and a `splunk-url` .\n\nWhen you use the `awsfirelens` log router to route logs to an AWS Service or AWS Partner Network destination for log storage and analytics, you can set the `log-driver-buffer-limit` option to limit the number of events that are buffered in memory, before being sent to the log router container. It can help to resolve potential log loss issue because high throughput might result in memory running out for the buffer inside of Docker.\n\nOther options you can specify when using `awsfirelens` to route logs depend on the destination. When you export logs to Amazon Data Firehose, you can specify the AWS Region with `region` and a name for the log stream with `delivery_stream` .\n\nWhen you export logs to Amazon Kinesis Data Streams, you can specify an AWS Region with `region` and a data stream name with `stream` .\n\nWhen you export logs to Amazon OpenSearch Service, you can specify options like `Name` , `Host` (OpenSearch Service endpoint without protocol), `Port` , `Index` , `Type` , `Aws_auth` , `Aws_region` , `Suppress_Type_Name` , and `tls` .\n\nWhen you export logs to Amazon S3, you can specify the bucket using the `bucket` option. You can also specify `region` , `total_file_size` , `upload_timeout` , and `use_put_object` as options.\n\nThis parameter requires version 1.19 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`", + "markdownDescription": "The configuration options to send to the log driver.\n\nThe options you can specify depend on the log driver. Some of the options you can specify when you use the `awslogs` log driver to route logs to Amazon CloudWatch include the following:\n\n- **awslogs-create-group** - Required: No\n\nSpecify whether you want the log group to be created automatically. If this option isn't specified, it defaults to `false` .\n\n> Your IAM policy must include the `logs:CreateLogGroup` permission before you attempt to use `awslogs-create-group` .\n- **awslogs-region** - Required: Yes\n\nSpecify the AWS Region that the `awslogs` log driver is to send your Docker logs to. You can choose to send all of your logs from clusters in different Regions to a single region in CloudWatch Logs. This is so that they're all visible in one location. Otherwise, you can separate them by Region for more granularity. Make sure that the specified log group exists in the Region that you specify with this option.\n- **awslogs-group** - Required: Yes\n\nMake sure to specify a log group that the `awslogs` log driver sends its log streams to.\n- **awslogs-stream-prefix** - Required: Yes, when using the Fargate launch type.Optional for the EC2 launch type, required for the Fargate launch type.\n\nUse the `awslogs-stream-prefix` option to associate a log stream with the specified prefix, the container name, and the ID of the Amazon ECS task that the container belongs to. If you specify a prefix with this option, then the log stream takes the format `prefix-name/container-name/ecs-task-id` .\n\nIf you don't specify a prefix with this option, then the log stream is named after the container ID that's assigned by the Docker daemon on the container instance. Because it's difficult to trace logs back to the container that sent them with just the Docker container ID (which is only available on the container instance), we recommend that you specify a prefix with this option.\n\nFor Amazon ECS services, you can use the service name as the prefix. Doing so, you can trace log streams to the service that the container belongs to, the name of the container that sent them, and the ID of the task that the container belongs to.\n\nYou must specify a stream-prefix for your logs to have your logs appear in the Log pane when using the Amazon ECS console.\n- **awslogs-datetime-format** - Required: No\n\nThis option defines a multiline start pattern in Python `strftime` format. A log message consists of a line that matches the pattern and any following lines that don\u2019t match the pattern. The matched line is the delimiter between log messages.\n\nOne example of a use case for using this format is for parsing output such as a stack dump, which might otherwise be logged in multiple entries. The correct pattern allows it to be captured in a single entry.\n\nFor more information, see [awslogs-datetime-format](https://docs.aws.amazon.com/https://docs.docker.com/config/containers/logging/awslogs/#awslogs-datetime-format) .\n\nYou cannot configure both the `awslogs-datetime-format` and `awslogs-multiline-pattern` options.\n\n> Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.\n- **awslogs-multiline-pattern** - Required: No\n\nThis option defines a multiline start pattern that uses a regular expression. A log message consists of a line that matches the pattern and any following lines that don\u2019t match the pattern. The matched line is the delimiter between log messages.\n\nFor more information, see [awslogs-multiline-pattern](https://docs.aws.amazon.com/https://docs.docker.com/config/containers/logging/awslogs/#awslogs-multiline-pattern) .\n\nThis option is ignored if `awslogs-datetime-format` is also configured.\n\nYou cannot configure both the `awslogs-datetime-format` and `awslogs-multiline-pattern` options.\n\n> Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.\n- **mode** - Required: No\n\nValid values: `non-blocking` | `blocking`\n\nThis option defines the delivery mode of log messages from the container to CloudWatch Logs. The delivery mode you choose affects application availability when the flow of logs from container to CloudWatch is interrupted.\n\nIf you use the `blocking` mode and the flow of logs to CloudWatch is interrupted, calls from container code to write to the `stdout` and `stderr` streams will block. The logging thread of the application will block as a result. This may cause the application to become unresponsive and lead to container healthcheck failure.\n\nIf you use the `non-blocking` mode, the container's logs are instead stored in an in-memory intermediate buffer configured with the `max-buffer-size` option. This prevents the application from becoming unresponsive when logs cannot be sent to CloudWatch. We recommend using this mode if you want to ensure service availability and are okay with some log loss. For more information, see [Preventing log loss with non-blocking mode in the `awslogs` container log driver](https://docs.aws.amazon.com/containers/preventing-log-loss-with-non-blocking-mode-in-the-awslogs-container-log-driver/) .\n- **max-buffer-size** - Required: No\n\nDefault value: `1m`\n\nWhen `non-blocking` mode is used, the `max-buffer-size` log option controls the size of the buffer that's used for intermediate message storage. Make sure to specify an adequate buffer size based on your application. When the buffer fills up, further logs cannot be stored. Logs that cannot be stored are lost.\n\nTo route logs using the `splunk` log router, you need to specify a `splunk-token` and a `splunk-url` .\n\nWhen you use the `awsfirelens` log router to route logs to an AWS Service or AWS Partner Network destination for log storage and analytics, you can set the `log-driver-buffer-limit` option to limit the number of events that are buffered in memory, before being sent to the log router container. It can help to resolve potential log loss issue because high throughput might result in memory running out for the buffer inside of Docker.\n\nOther options you can specify when using `awsfirelens` to route logs depend on the destination. When you export logs to Amazon Data Firehose, you can specify the AWS Region with `region` and a name for the log stream with `delivery_stream` .\n\nWhen you export logs to Amazon Kinesis Data Streams, you can specify an AWS Region with `region` and a data stream name with `stream` .\n\nWhen you export logs to Amazon OpenSearch Service, you can specify options like `Name` , `Host` (OpenSearch Service endpoint without protocol), `Port` , `Index` , `Type` , `Aws_auth` , `Aws_region` , `Suppress_Type_Name` , and `tls` . For more information, see [Under the hood: FireLens for Amazon ECS Tasks](https://docs.aws.amazon.com/containers/under-the-hood-firelens-for-amazon-ecs-tasks/) .\n\nWhen you export logs to Amazon S3, you can specify the bucket using the `bucket` option. You can also specify `region` , `total_file_size` , `upload_timeout` , and `use_put_object` as options.\n\nThis parameter requires version 1.19 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" @@ -84789,7 +84737,7 @@ "additionalProperties": false, "properties": { "SizeInGiB": { - "markdownDescription": "The total amount, in GiB, of ephemeral storage to set for the task. The minimum supported value is `20` GiB and the maximum supported value is `200` GiB.", + "markdownDescription": "The total amount, in GiB, of ephemeral storage to set for the task. The minimum supported value is `21` GiB and the maximum supported value is `200` GiB.", "title": "SizeInGiB", "type": "number" } @@ -85035,7 +84983,7 @@ }, "Options": { "additionalProperties": true, - "markdownDescription": "The configuration options to send to the log driver.\n\nThe options you can specify depend on the log driver. Some of the options you can specify when you use the `awslogs` log driver to route logs to Amazon CloudWatch include the following:\n\n- **awslogs-create-group** - Required: No\n\nSpecify whether you want the log group to be created automatically. If this option isn't specified, it defaults to `false` .\n\n> Your IAM policy must include the `logs:CreateLogGroup` permission before you attempt to use `awslogs-create-group` .\n- **awslogs-region** - Required: Yes\n\nSpecify the AWS Region that the `awslogs` log driver is to send your Docker logs to. You can choose to send all of your logs from clusters in different Regions to a single region in CloudWatch Logs. This is so that they're all visible in one location. Otherwise, you can separate them by Region for more granularity. Make sure that the specified log group exists in the Region that you specify with this option.\n- **awslogs-group** - Required: Yes\n\nMake sure to specify a log group that the `awslogs` log driver sends its log streams to.\n- **awslogs-stream-prefix** - Required: Yes, when using the Fargate launch type.Optional for the EC2 launch type, required for the Fargate launch type.\n\nUse the `awslogs-stream-prefix` option to associate a log stream with the specified prefix, the container name, and the ID of the Amazon ECS task that the container belongs to. If you specify a prefix with this option, then the log stream takes the format `prefix-name/container-name/ecs-task-id` .\n\nIf you don't specify a prefix with this option, then the log stream is named after the container ID that's assigned by the Docker daemon on the container instance. Because it's difficult to trace logs back to the container that sent them with just the Docker container ID (which is only available on the container instance), we recommend that you specify a prefix with this option.\n\nFor Amazon ECS services, you can use the service name as the prefix. Doing so, you can trace log streams to the service that the container belongs to, the name of the container that sent them, and the ID of the task that the container belongs to.\n\nYou must specify a stream-prefix for your logs to have your logs appear in the Log pane when using the Amazon ECS console.\n- **awslogs-datetime-format** - Required: No\n\nThis option defines a multiline start pattern in Python `strftime` format. A log message consists of a line that matches the pattern and any following lines that don\u2019t match the pattern. The matched line is the delimiter between log messages.\n\nOne example of a use case for using this format is for parsing output such as a stack dump, which might otherwise be logged in multiple entries. The correct pattern allows it to be captured in a single entry.\n\nFor more information, see [awslogs-datetime-format](https://docs.aws.amazon.com/https://docs.docker.com/config/containers/logging/awslogs/#awslogs-datetime-format) .\n\nYou cannot configure both the `awslogs-datetime-format` and `awslogs-multiline-pattern` options.\n\n> Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.\n- **awslogs-multiline-pattern** - Required: No\n\nThis option defines a multiline start pattern that uses a regular expression. A log message consists of a line that matches the pattern and any following lines that don\u2019t match the pattern. The matched line is the delimiter between log messages.\n\nFor more information, see [awslogs-multiline-pattern](https://docs.aws.amazon.com/https://docs.docker.com/config/containers/logging/awslogs/#awslogs-multiline-pattern) .\n\nThis option is ignored if `awslogs-datetime-format` is also configured.\n\nYou cannot configure both the `awslogs-datetime-format` and `awslogs-multiline-pattern` options.\n\n> Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.\n- **mode** - Required: No\n\nValid values: `non-blocking` | `blocking`\n\nThis option defines the delivery mode of log messages from the container to CloudWatch Logs. The delivery mode you choose affects application availability when the flow of logs from container to CloudWatch is interrupted.\n\nIf you use the `blocking` mode and the flow of logs to CloudWatch is interrupted, calls from container code to write to the `stdout` and `stderr` streams will block. The logging thread of the application will block as a result. This may cause the application to become unresponsive and lead to container healthcheck failure.\n\nIf you use the `non-blocking` mode, the container's logs are instead stored in an in-memory intermediate buffer configured with the `max-buffer-size` option. This prevents the application from becoming unresponsive when logs cannot be sent to CloudWatch. We recommend using this mode if you want to ensure service availability and are okay with some log loss. For more information, see [Preventing log loss with non-blocking mode in the `awslogs` container log driver](https://docs.aws.amazon.com/containers/preventing-log-loss-with-non-blocking-mode-in-the-awslogs-container-log-driver/) .\n- **max-buffer-size** - Required: No\n\nDefault value: `1m`\n\nWhen `non-blocking` mode is used, the `max-buffer-size` log option controls the size of the buffer that's used for intermediate message storage. Make sure to specify an adequate buffer size based on your application. When the buffer fills up, further logs cannot be stored. Logs that cannot be stored are lost.\n\nTo route logs using the `splunk` log router, you need to specify a `splunk-token` and a `splunk-url` .\n\nWhen you use the `awsfirelens` log router to route logs to an AWS Service or AWS Partner Network destination for log storage and analytics, you can set the `log-driver-buffer-limit` option to limit the number of events that are buffered in memory, before being sent to the log router container. It can help to resolve potential log loss issue because high throughput might result in memory running out for the buffer inside of Docker.\n\nOther options you can specify when using `awsfirelens` to route logs depend on the destination. When you export logs to Amazon Data Firehose, you can specify the AWS Region with `region` and a name for the log stream with `delivery_stream` .\n\nWhen you export logs to Amazon Kinesis Data Streams, you can specify an AWS Region with `region` and a data stream name with `stream` .\n\nWhen you export logs to Amazon OpenSearch Service, you can specify options like `Name` , `Host` (OpenSearch Service endpoint without protocol), `Port` , `Index` , `Type` , `Aws_auth` , `Aws_region` , `Suppress_Type_Name` , and `tls` .\n\nWhen you export logs to Amazon S3, you can specify the bucket using the `bucket` option. You can also specify `region` , `total_file_size` , `upload_timeout` , and `use_put_object` as options.\n\nThis parameter requires version 1.19 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`", + "markdownDescription": "The configuration options to send to the log driver.\n\nThe options you can specify depend on the log driver. Some of the options you can specify when you use the `awslogs` log driver to route logs to Amazon CloudWatch include the following:\n\n- **awslogs-create-group** - Required: No\n\nSpecify whether you want the log group to be created automatically. If this option isn't specified, it defaults to `false` .\n\n> Your IAM policy must include the `logs:CreateLogGroup` permission before you attempt to use `awslogs-create-group` .\n- **awslogs-region** - Required: Yes\n\nSpecify the AWS Region that the `awslogs` log driver is to send your Docker logs to. You can choose to send all of your logs from clusters in different Regions to a single region in CloudWatch Logs. This is so that they're all visible in one location. Otherwise, you can separate them by Region for more granularity. Make sure that the specified log group exists in the Region that you specify with this option.\n- **awslogs-group** - Required: Yes\n\nMake sure to specify a log group that the `awslogs` log driver sends its log streams to.\n- **awslogs-stream-prefix** - Required: Yes, when using the Fargate launch type.Optional for the EC2 launch type, required for the Fargate launch type.\n\nUse the `awslogs-stream-prefix` option to associate a log stream with the specified prefix, the container name, and the ID of the Amazon ECS task that the container belongs to. If you specify a prefix with this option, then the log stream takes the format `prefix-name/container-name/ecs-task-id` .\n\nIf you don't specify a prefix with this option, then the log stream is named after the container ID that's assigned by the Docker daemon on the container instance. Because it's difficult to trace logs back to the container that sent them with just the Docker container ID (which is only available on the container instance), we recommend that you specify a prefix with this option.\n\nFor Amazon ECS services, you can use the service name as the prefix. Doing so, you can trace log streams to the service that the container belongs to, the name of the container that sent them, and the ID of the task that the container belongs to.\n\nYou must specify a stream-prefix for your logs to have your logs appear in the Log pane when using the Amazon ECS console.\n- **awslogs-datetime-format** - Required: No\n\nThis option defines a multiline start pattern in Python `strftime` format. A log message consists of a line that matches the pattern and any following lines that don\u2019t match the pattern. The matched line is the delimiter between log messages.\n\nOne example of a use case for using this format is for parsing output such as a stack dump, which might otherwise be logged in multiple entries. The correct pattern allows it to be captured in a single entry.\n\nFor more information, see [awslogs-datetime-format](https://docs.aws.amazon.com/https://docs.docker.com/config/containers/logging/awslogs/#awslogs-datetime-format) .\n\nYou cannot configure both the `awslogs-datetime-format` and `awslogs-multiline-pattern` options.\n\n> Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.\n- **awslogs-multiline-pattern** - Required: No\n\nThis option defines a multiline start pattern that uses a regular expression. A log message consists of a line that matches the pattern and any following lines that don\u2019t match the pattern. The matched line is the delimiter between log messages.\n\nFor more information, see [awslogs-multiline-pattern](https://docs.aws.amazon.com/https://docs.docker.com/config/containers/logging/awslogs/#awslogs-multiline-pattern) .\n\nThis option is ignored if `awslogs-datetime-format` is also configured.\n\nYou cannot configure both the `awslogs-datetime-format` and `awslogs-multiline-pattern` options.\n\n> Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.\n- **mode** - Required: No\n\nValid values: `non-blocking` | `blocking`\n\nThis option defines the delivery mode of log messages from the container to CloudWatch Logs. The delivery mode you choose affects application availability when the flow of logs from container to CloudWatch is interrupted.\n\nIf you use the `blocking` mode and the flow of logs to CloudWatch is interrupted, calls from container code to write to the `stdout` and `stderr` streams will block. The logging thread of the application will block as a result. This may cause the application to become unresponsive and lead to container healthcheck failure.\n\nIf you use the `non-blocking` mode, the container's logs are instead stored in an in-memory intermediate buffer configured with the `max-buffer-size` option. This prevents the application from becoming unresponsive when logs cannot be sent to CloudWatch. We recommend using this mode if you want to ensure service availability and are okay with some log loss. For more information, see [Preventing log loss with non-blocking mode in the `awslogs` container log driver](https://docs.aws.amazon.com/containers/preventing-log-loss-with-non-blocking-mode-in-the-awslogs-container-log-driver/) .\n- **max-buffer-size** - Required: No\n\nDefault value: `1m`\n\nWhen `non-blocking` mode is used, the `max-buffer-size` log option controls the size of the buffer that's used for intermediate message storage. Make sure to specify an adequate buffer size based on your application. When the buffer fills up, further logs cannot be stored. Logs that cannot be stored are lost.\n\nTo route logs using the `splunk` log router, you need to specify a `splunk-token` and a `splunk-url` .\n\nWhen you use the `awsfirelens` log router to route logs to an AWS Service or AWS Partner Network destination for log storage and analytics, you can set the `log-driver-buffer-limit` option to limit the number of events that are buffered in memory, before being sent to the log router container. It can help to resolve potential log loss issue because high throughput might result in memory running out for the buffer inside of Docker.\n\nOther options you can specify when using `awsfirelens` to route logs depend on the destination. When you export logs to Amazon Data Firehose, you can specify the AWS Region with `region` and a name for the log stream with `delivery_stream` .\n\nWhen you export logs to Amazon Kinesis Data Streams, you can specify an AWS Region with `region` and a data stream name with `stream` .\n\nWhen you export logs to Amazon OpenSearch Service, you can specify options like `Name` , `Host` (OpenSearch Service endpoint without protocol), `Port` , `Index` , `Type` , `Aws_auth` , `Aws_region` , `Suppress_Type_Name` , and `tls` . For more information, see [Under the hood: FireLens for Amazon ECS Tasks](https://docs.aws.amazon.com/containers/under-the-hood-firelens-for-amazon-ecs-tasks/) .\n\nWhen you export logs to Amazon S3, you can specify the bucket using the `bucket` option. You can also specify `region` , `total_file_size` , `upload_timeout` , and `use_put_object` as options.\n\nThis parameter requires version 1.19 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" @@ -85481,7 +85429,7 @@ "additionalProperties": false, "properties": { "AssignPublicIp": { - "markdownDescription": "Whether the task's elastic network interface receives a public IP address. The default value is `DISABLED` .", + "markdownDescription": "Whether the task's elastic network interface receives a public IP address. The default value is `ENABLED` .", "title": "AssignPublicIp", "type": "string" }, @@ -85924,7 +85872,7 @@ "additionalProperties": false, "properties": { "ReplicationOverwriteProtection": { - "markdownDescription": "The status of the file system's replication overwrite protection.\n\n- `ENABLED` \u2013 The file system cannot be used as the destination file system in a replication configuration. The file system is writeable. Replication overwrite protection is `ENABLED` by default.\n- `DISABLED` \u2013 The file system can be used as the destination file system in a replication configuration. The file system is read-only and can only be modified by EFS replication.\n- `REPLICATING` \u2013 The file system is being used as the destination file system in a replication configuration. The file system is read-only and is only modified only by EFS replication.\n\nIf the replication configuration is deleted, the file system's replication overwrite protection is re-enabled, the file system becomes writeable.", + "markdownDescription": "The status of the file system's replication overwrite protection.\n\n- `ENABLED` \u2013 The file system cannot be used as the destination file system in a replication configuration. The file system is writeable. Replication overwrite protection is `ENABLED` by default.\n- `DISABLED` \u2013 The file system can be used as the destination file system in a replication configuration. The file system is read-only and can only be modified by EFS replication.\n- `REPLICATING` \u2013 The file system is being used as the destination file system in a replication configuration. The file system is read-only and is modified only by EFS replication.\n\nIf the replication configuration is deleted, the file system's replication overwrite protection is re-enabled, the file system becomes writeable.", "title": "ReplicationOverwriteProtection", "type": "string" } @@ -86517,7 +86465,7 @@ "additionalProperties": false, "properties": { "IpFamily": { - "markdownDescription": "Specify which IP family is used to assign Kubernetes pod and service IP addresses. If you don't specify a value, `ipv4` is used by default. You can only specify an IP family when you create a cluster and can't change this value once the cluster is created. If you specify `ipv6` , the VPC and subnets that you specify for cluster creation must have both `IPv4` and `IPv6` CIDR blocks assigned to them. You can't specify `ipv6` for clusters in China Regions.\n\nYou can only specify `ipv6` for `1.21` and later clusters that use version `1.10.1` or later of the Amazon VPC CNI add-on. If you specify `ipv6` , then ensure that your VPC meets the requirements listed in the considerations listed in [Assigning IPv6 addresses to pods and services](https://docs.aws.amazon.com/eks/latest/userguide/cni-ipv6.html) in the Amazon EKS User Guide. Kubernetes assigns services `IPv6` addresses from the unique local address range `(fc00::/7)` . You can't specify a custom `IPv6` CIDR block. Pod addresses are assigned from the subnet's `IPv6` CIDR.", + "markdownDescription": "Specify which IP family is used to assign Kubernetes pod and service IP addresses. If you don't specify a value, `ipv4` is used by default. You can only specify an IP family when you create a cluster and can't change this value once the cluster is created. If you specify `ipv6` , the VPC and subnets that you specify for cluster creation must have both `IPv4` and `IPv6` CIDR blocks assigned to them. You can't specify `ipv6` for clusters in China Regions.\n\nYou can only specify `ipv6` for `1.21` and later clusters that use version `1.10.1` or later of the Amazon VPC CNI add-on. If you specify `ipv6` , then ensure that your VPC meets the requirements listed in the considerations listed in [Assigning IPv6 addresses to pods and services](https://docs.aws.amazon.com/eks/latest/userguide/cni-ipv6.html) in the *Amazon EKS User Guide* . Kubernetes assigns services `IPv6` addresses from the unique local address range `(fc00::/7)` . You can't specify a custom `IPv6` CIDR block. Pod addresses are assigned from the subnet's `IPv6` CIDR.", "title": "IpFamily", "type": "string" }, @@ -94331,7 +94279,7 @@ "additionalProperties": false, "properties": { "Key": { - "markdownDescription": "The name of the attribute.\n\nThe following attributes are supported by all load balancers:\n\n- `deregistration_delay.timeout_seconds` - The amount of time, in seconds, for Elastic Load Balancing to wait before changing the state of a deregistering target from `draining` to `unused` . The range is 0-3600 seconds. The default value is 300 seconds. If the target is a Lambda function, this attribute is not supported.\n- `stickiness.enabled` - Indicates whether target stickiness is enabled. The value is `true` or `false` . The default is `false` .\n- `stickiness.type` - Indicates the type of stickiness. The possible values are:\n\n- `lb_cookie` and `app_cookie` for Application Load Balancers.\n- `source_ip` for Network Load Balancers.\n- `source_ip_dest_ip` and `source_ip_dest_ip_proto` for Gateway Load Balancers.\n\nThe following attributes are supported by Application Load Balancers and Network Load Balancers:\n\n- `load_balancing.cross_zone.enabled` - Indicates whether cross zone load balancing is enabled. The value is `true` , `false` or `use_load_balancer_configuration` . The default is `use_load_balancer_configuration` .\n- `target_group_health.dns_failover.minimum_healthy_targets.count` - The minimum number of targets that must be healthy. If the number of healthy targets is below this value, mark the zone as unhealthy in DNS, so that traffic is routed only to healthy zones. The possible values are `off` or an integer from 1 to the maximum number of targets. The default is `off` .\n- `target_group_health.dns_failover.minimum_healthy_targets.percentage` - The minimum percentage of targets that must be healthy. If the percentage of healthy targets is below this value, mark the zone as unhealthy in DNS, so that traffic is routed only to healthy zones. The possible values are `off` or an integer from 1 to 100. The default is `off` .\n- `target_group_health.unhealthy_state_routing.minimum_healthy_targets.count` - The minimum number of targets that must be healthy. If the number of healthy targets is below this value, send traffic to all targets, including unhealthy targets. The possible values are 1 to the maximum number of targets. The default is 1.\n- `target_group_health.unhealthy_state_routing.minimum_healthy_targets.percentage` - The minimum percentage of targets that must be healthy. If the percentage of healthy targets is below this value, send traffic to all targets, including unhealthy targets. The possible values are `off` or an integer from 1 to 100. The default is `off` .\n\nThe following attributes are supported only if the load balancer is an Application Load Balancer and the target is an instance or an IP address:\n\n- `load_balancing.algorithm.type` - The load balancing algorithm determines how the load balancer selects targets when routing requests. The value is `round_robin` , `least_outstanding_requests` , or `weighted_random` . The default is `round_robin` .\n- `load_balancing.algorithm.anomaly_mitigation` - Only available when `load_balancing.algorithm.type` is `weighted_random` . Indicates whether anomaly mitigation is enabled. The value is `on` or `off` . The default is `off` .\n- `slow_start.duration_seconds` - The time period, in seconds, during which a newly registered target receives an increasing share of the traffic to the target group. After this time period ends, the target receives its full share of traffic. The range is 30-900 seconds (15 minutes). The default is 0 seconds (disabled).\n- `stickiness.app_cookie.cookie_name` - Indicates the name of the application-based cookie. Names that start with the following prefixes are not allowed: `AWSALB` , `AWSALBAPP` , and `AWSALBTG` ; they're reserved for use by the load balancer.\n- `stickiness.app_cookie.duration_seconds` - The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the application-based cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds).\n- `stickiness.lb_cookie.duration_seconds` - The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the load balancer-generated cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds).\n\nThe following attribute is supported only if the load balancer is an Application Load Balancer and the target is a Lambda function:\n\n- `lambda.multi_value_headers.enabled` - Indicates whether the request and response headers that are exchanged between the load balancer and the Lambda function include arrays of values or strings. The value is `true` or `false` . The default is `false` . If the value is `false` and the request contains a duplicate header field name or query parameter key, the load balancer uses the last value sent by the client.\n\nThe following attributes are supported only by Network Load Balancers:\n\n- `deregistration_delay.connection_termination.enabled` - Indicates whether the load balancer terminates connections at the end of the deregistration timeout. The value is `true` or `false` . For new UDP/TCP_UDP target groups the default is `true` . Otherwise, the default is `false` .\n- `preserve_client_ip.enabled` - Indicates whether client IP preservation is enabled. The value is `true` or `false` . The default is disabled if the target group type is IP address and the target group protocol is TCP or TLS. Otherwise, the default is enabled. Client IP preservation can't be disabled for UDP and TCP_UDP target groups.\n- `proxy_protocol_v2.enabled` - Indicates whether Proxy Protocol version 2 is enabled. The value is `true` or `false` . The default is `false` .\n- `target_health_state.unhealthy.connection_termination.enabled` - Indicates whether the load balancer terminates connections to unhealthy targets. The value is `true` or `false` . The default is `true` .\n- `target_health_state.unhealthy.draining_interval_seconds` - The amount of time for Elastic Load Balancing to wait before changing the state of an unhealthy target from `unhealthy.draining` to `unhealthy` . The range is 0-360000 seconds. The default value is 0 seconds.\n\nNote: This attribute can only be configured when `target_health_state.unhealthy.connection_termination.enabled` is `false` .\n\nThe following attributes are supported only by Gateway Load Balancers:\n\n- `target_failover.on_deregistration` - Indicates how the Gateway Load Balancer handles existing flows when a target is deregistered. The possible values are `rebalance` and `no_rebalance` . The default is `no_rebalance` . The two attributes ( `target_failover.on_deregistration` and `target_failover.on_unhealthy` ) can't be set independently. The value you set for both attributes must be the same.\n- `target_failover.on_unhealthy` - Indicates how the Gateway Load Balancer handles existing flows when a target is unhealthy. The possible values are `rebalance` and `no_rebalance` . The default is `no_rebalance` . The two attributes ( `target_failover.on_deregistration` and `target_failover.on_unhealthy` ) can't be set independently. The value you set for both attributes must be the same.", + "markdownDescription": "The name of the attribute.\n\nThe following attributes are supported by all load balancers:\n\n- `deregistration_delay.timeout_seconds` - The amount of time, in seconds, for Elastic Load Balancing to wait before changing the state of a deregistering target from `draining` to `unused` . The range is 0-3600 seconds. The default value is 300 seconds. If the target is a Lambda function, this attribute is not supported.\n- `stickiness.enabled` - Indicates whether target stickiness is enabled. The value is `true` or `false` . The default is `false` .\n- `stickiness.type` - Indicates the type of stickiness. The possible values are:\n\n- `lb_cookie` and `app_cookie` for Application Load Balancers.\n- `source_ip` for Network Load Balancers.\n- `source_ip_dest_ip` and `source_ip_dest_ip_proto` for Gateway Load Balancers.\n\nThe following attributes are supported by Application Load Balancers and Network Load Balancers:\n\n- `load_balancing.cross_zone.enabled` - Indicates whether cross zone load balancing is enabled. The value is `true` , `false` or `use_load_balancer_configuration` . The default is `use_load_balancer_configuration` .\n- `target_group_health.dns_failover.minimum_healthy_targets.count` - The minimum number of targets that must be healthy. If the number of healthy targets is below this value, mark the zone as unhealthy in DNS, so that traffic is routed only to healthy zones. The possible values are `off` or an integer from 1 to the maximum number of targets. The default is `off` .\n- `target_group_health.dns_failover.minimum_healthy_targets.percentage` - The minimum percentage of targets that must be healthy. If the percentage of healthy targets is below this value, mark the zone as unhealthy in DNS, so that traffic is routed only to healthy zones. The possible values are `off` or an integer from 1 to 100. The default is `off` .\n- `target_group_health.unhealthy_state_routing.minimum_healthy_targets.count` - The minimum number of targets that must be healthy. If the number of healthy targets is below this value, send traffic to all targets, including unhealthy targets. The possible values are 1 to the maximum number of targets. The default is 1.\n- `target_group_health.unhealthy_state_routing.minimum_healthy_targets.percentage` - The minimum percentage of targets that must be healthy. If the percentage of healthy targets is below this value, send traffic to all targets, including unhealthy targets. The possible values are `off` or an integer from 1 to 100. The default is `off` .\n\nThe following attributes are supported only if the load balancer is an Application Load Balancer and the target is an instance or an IP address:\n\n- `load_balancing.algorithm.type` - The load balancing algorithm determines how the load balancer selects targets when routing requests. The value is `round_robin` , `least_outstanding_requests` , or `weighted_random` . The default is `round_robin` .\n- `load_balancing.algorithm.anomaly_mitigation` - Only available when `load_balancing.algorithm.type` is `weighted_random` . Indicates whether anomaly mitigation is enabled. The value is `on` or `off` . The default is `off` .\n- `slow_start.duration_seconds` - The time period, in seconds, during which a newly registered target receives an increasing share of the traffic to the target group. After this time period ends, the target receives its full share of traffic. The range is 30-900 seconds (15 minutes). The default is 0 seconds (disabled).\n- `stickiness.app_cookie.cookie_name` - Indicates the name of the application-based cookie. Names that start with the following prefixes are not allowed: `AWSALB` , `AWSALBAPP` , and `AWSALBTG` ; they're reserved for use by the load balancer.\n- `stickiness.app_cookie.duration_seconds` - The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the application-based cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds).\n- `stickiness.lb_cookie.duration_seconds` - The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the load balancer-generated cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds).\n\nThe following attribute is supported only if the load balancer is an Application Load Balancer and the target is a Lambda function:\n\n- `lambda.multi_value_headers.enabled` - Indicates whether the request and response headers that are exchanged between the load balancer and the Lambda function include arrays of values or strings. The value is `true` or `false` . The default is `false` . If the value is `false` and the request contains a duplicate header field name or query parameter key, the load balancer uses the last value sent by the client.\n\nThe following attributes are supported only by Network Load Balancers:\n\n- `deregistration_delay.connection_termination.enabled` - Indicates whether the load balancer terminates connections at the end of the deregistration timeout. The value is `true` or `false` . For new UDP/TCP_UDP target groups the default is `true` . Otherwise, the default is `false` .\n- `preserve_client_ip.enabled` - Indicates whether client IP preservation is enabled. The value is `true` or `false` . The default is disabled if the target group type is IP address and the target group protocol is TCP or TLS. Otherwise, the default is enabled. Client IP preservation can't be disabled for UDP and TCP_UDP target groups.\n- `proxy_protocol_v2.enabled` - Indicates whether Proxy Protocol version 2 is enabled. The value is `true` or `false` . The default is `false` .\n- `target_health_state.unhealthy.connection_termination.enabled` - Indicates whether the load balancer terminates connections to unhealthy targets. The value is `true` or `false` . The default is `true` . This attribute can't be enabled for UDP and TCP_UDP target groups.\n- `target_health_state.unhealthy.draining_interval_seconds` - The amount of time for Elastic Load Balancing to wait before changing the state of an unhealthy target from `unhealthy.draining` to `unhealthy` . The range is 0-360000 seconds. The default value is 0 seconds.\n\nNote: This attribute can only be configured when `target_health_state.unhealthy.connection_termination.enabled` is `false` .\n\nThe following attributes are supported only by Gateway Load Balancers:\n\n- `target_failover.on_deregistration` - Indicates how the Gateway Load Balancer handles existing flows when a target is deregistered. The possible values are `rebalance` and `no_rebalance` . The default is `no_rebalance` . The two attributes ( `target_failover.on_deregistration` and `target_failover.on_unhealthy` ) can't be set independently. The value you set for both attributes must be the same.\n- `target_failover.on_unhealthy` - Indicates how the Gateway Load Balancer handles existing flows when a target is unhealthy. The possible values are `rebalance` and `no_rebalance` . The default is `no_rebalance` . The two attributes ( `target_failover.on_deregistration` and `target_failover.on_unhealthy` ) can't be set independently. The value you set for both attributes must be the same.", "title": "Key", "type": "string" }, @@ -96506,7 +96454,7 @@ "properties": { "AuthParameters": { "$ref": "#/definitions/AWS::Events::Connection.AuthParameters", - "markdownDescription": "A `CreateConnectionAuthRequestParameters` object that contains the authorization parameters to use to authorize with the endpoint.", + "markdownDescription": "The authorization parameters to use to authorize with the endpoint.\n\nYou must include only authorization parameters for the `AuthorizationType` you specify.", "title": "AuthParameters" }, "AuthorizationType": { @@ -96640,7 +96588,7 @@ "items": { "$ref": "#/definitions/AWS::Events::Connection.Parameter" }, - "markdownDescription": "Contains additional body string parameters for the connection.", + "markdownDescription": "Any additional body string parameters for the connection.", "title": "BodyParameters", "type": "array" }, @@ -96648,7 +96596,7 @@ "items": { "$ref": "#/definitions/AWS::Events::Connection.Parameter" }, - "markdownDescription": "Contains additional header parameters for the connection.", + "markdownDescription": "Any additional header parameters for the connection.", "title": "HeaderParameters", "type": "array" }, @@ -96656,7 +96604,7 @@ "items": { "$ref": "#/definitions/AWS::Events::Connection.Parameter" }, - "markdownDescription": "Contains additional query string parameters for the connection.", + "markdownDescription": "Any additional query string parameters for the connection.", "title": "QueryStringParameters", "type": "array" } @@ -96673,7 +96621,7 @@ }, "ClientParameters": { "$ref": "#/definitions/AWS::Events::Connection.ClientParameters", - "markdownDescription": "A `CreateConnectionOAuthClientRequestParameters` object that contains the client parameters for OAuth authorization.", + "markdownDescription": "The client parameters for OAuth authorization.", "title": "ClientParameters" }, "HttpMethod": { @@ -96683,7 +96631,7 @@ }, "OAuthHttpParameters": { "$ref": "#/definitions/AWS::Events::Connection.ConnectionHttpParameters", - "markdownDescription": "A `ConnectionHttpParameters` object that contains details about the additional parameters to use for the connection.", + "markdownDescription": "Details about the additional parameters to use for the connection.", "title": "OAuthHttpParameters" } }, @@ -97651,12 +97599,12 @@ "additionalProperties": false, "properties": { "Name": { - "markdownDescription": "Name of parameter to start execution of a SageMaker Model Building Pipeline.", + "markdownDescription": "Name of parameter to start execution of a SageMaker AI Model Building Pipeline.", "title": "Name", "type": "string" }, "Value": { - "markdownDescription": "Value of parameter to start execution of a SageMaker Model Building Pipeline.", + "markdownDescription": "Value of parameter to start execution of a SageMaker AI Model Building Pipeline.", "title": "Value", "type": "string" } @@ -97674,7 +97622,7 @@ "items": { "$ref": "#/definitions/AWS::Events::Rule.SageMakerPipelineParameter" }, - "markdownDescription": "List of Parameter names and values for SageMaker Model Building Pipeline execution.", + "markdownDescription": "List of Parameter names and values for SageMaker AI Model Building Pipeline execution.", "title": "PipelineParameterList", "type": "array" } @@ -97760,7 +97708,7 @@ }, "RetryPolicy": { "$ref": "#/definitions/AWS::Events::Rule.RetryPolicy", - "markdownDescription": "The `RetryPolicy` object that contains the retry policy configuration to use for the dead-letter queue.", + "markdownDescription": "The retry policy configuration to use for the dead-letter queue.", "title": "RetryPolicy" }, "RoleArn": { @@ -97775,7 +97723,7 @@ }, "SageMakerPipelineParameters": { "$ref": "#/definitions/AWS::Events::Rule.SageMakerPipelineParameters", - "markdownDescription": "Contains the SageMaker Model Building Pipeline parameters to start execution of a SageMaker Model Building Pipeline.\n\nIf you specify a SageMaker Model Building Pipeline as a target, you can use this to specify parameters to start a pipeline execution based on EventBridge events.", + "markdownDescription": "Contains the SageMaker AI Model Building Pipeline parameters to start execution of a SageMaker AI Model Building Pipeline.\n\nIf you specify a SageMaker AI Model Building Pipeline as a target, you can use this to specify parameters to start a pipeline execution based on EventBridge events.", "title": "SageMakerPipelineParameters" }, "SqsParameters": { @@ -99672,7 +99620,7 @@ "type": "boolean" }, "DataRepositoryPath": { - "markdownDescription": "The path to the Amazon S3 data repository that will be linked to the file system. The path can be an S3 bucket or prefix in the format `s3://bucket-name/prefix/` . This path specifies where in the S3 data repository files will be imported from or exported to.", + "markdownDescription": "The path to the Amazon S3 data repository that will be linked to the file system. The path can be an S3 bucket or prefix in the format `s3://myBucket/myPrefix/` . This path specifies where in the S3 data repository files will be imported from or exported to.", "title": "DataRepositoryPath", "type": "string" }, @@ -99867,7 +99815,7 @@ "type": "number" }, "StorageType": { - "markdownDescription": "Sets the storage type for the file system that you're creating. Valid values are `SSD` and `HDD` .\n\n- Set to `SSD` to use solid state drive storage. SSD is supported on all Windows, Lustre, ONTAP, and OpenZFS deployment types.\n- Set to `HDD` to use hard disk drive storage. HDD is supported on `SINGLE_AZ_2` and `MULTI_AZ_1` Windows file system deployment types, and on `PERSISTENT_1` Lustre file system deployment types.\n\nDefault value is `SSD` . For more information, see [Storage type options](https://docs.aws.amazon.com/fsx/latest/WindowsGuide/optimize-fsx-costs.html#storage-type-options) in the *FSx for Windows File Server User Guide* and [Multiple storage options](https://docs.aws.amazon.com/fsx/latest/LustreGuide/what-is.html#storage-options) in the *FSx for Lustre User Guide* .", + "markdownDescription": "Sets the storage class for the file system that you're creating. Valid values are `SSD` , `HDD` , and `INTELLIGENT_TIERING` .\n\n- Set to `SSD` to use solid state drive storage. SSD is supported on all Windows, Lustre, ONTAP, and OpenZFS deployment types.\n- Set to `HDD` to use hard disk drive storage. HDD is supported on `SINGLE_AZ_2` and `MULTI_AZ_1` Windows file system deployment types, and on `PERSISTENT_1` Lustre file system deployment types.\n- Set to `INTELLIGENT_TIERING` to use fully elastic, intelligently-tiered storage. Intelligent-Tiering is only available for OpenZFS file systems with the Multi-AZ deployment type.\n\nDefault value is `SSD` . For more information, see [Storage type options](https://docs.aws.amazon.com/fsx/latest/WindowsGuide/optimize-fsx-costs.html#storage-type-options) in the *FSx for Windows File Server User Guide* , [Multiple storage options](https://docs.aws.amazon.com/fsx/latest/LustreGuide/what-is.html#storage-options) in the *FSx for Lustre User Guide* , and [Working with Intelligent-Tiering](https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/performance-intelligent-tiering) in the *Amazon FSx for OpenZFS User Guide* .", "title": "StorageType", "type": "string" }, @@ -100910,7 +100858,7 @@ "type": "boolean" }, "RecordSizeKiB": { - "markdownDescription": "Specifies the suggested block size for a volume in a ZFS dataset, in kibibytes (KiB). Valid values are 4, 8, 16, 32, 64, 128, 256, 512, or 1024 KiB. The default is 128 KiB. We recommend using the default setting for the majority of use cases. Generally, workloads that write in fixed small or large record sizes may benefit from setting a custom record size, like database workloads (small record size) or media streaming workloads (large record size). For additional guidance on when to set a custom record size, see [ZFS Record size](https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/performance.html#record-size-performance) in the *Amazon FSx for OpenZFS User Guide* .", + "markdownDescription": "Specifies the suggested block size for a volume in a ZFS dataset, in kibibytes (KiB). For file systems using the Intelligent-Tiering storage class, valid values are 128, 256, 512, 1024, 2048, or 4096 KiB, with a default of 1024 KiB. For all other file systems, valid values are 4, 8, 16, 32, 64, 128, 256, 512, or 1024 KiB, with a default of 128 KiB. We recommend using the default setting for the majority of use cases. Generally, workloads that write in fixed small or large record sizes may benefit from setting a custom record size, like database workloads (small record size) or media streaming workloads (large record size). For additional guidance on when to set a custom record size, see [ZFS Record size](https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/performance.html#record-size-performance) in the *Amazon FSx for OpenZFS User Guide* .", "title": "RecordSizeKiB", "type": "number" }, @@ -105282,7 +105230,7 @@ "type": "object" }, "ConnectionType": { - "markdownDescription": "The type of the connection. Currently, these types are supported:\n\n- `JDBC` - Designates a connection to a database through Java Database Connectivity (JDBC).\n\n`JDBC` Connections use the following ConnectionParameters.\n\n- Required: All of ( `HOST` , `PORT` , `JDBC_ENGINE` ) or `JDBC_CONNECTION_URL` .\n- Required: All of ( `USERNAME` , `PASSWORD` ) or `SECRET_ID` .\n- Optional: `JDBC_ENFORCE_SSL` , `CUSTOM_JDBC_CERT` , `CUSTOM_JDBC_CERT_STRING` , `SKIP_CUSTOM_JDBC_CERT_VALIDATION` . These parameters are used to configure SSL with JDBC.\n- `KAFKA` - Designates a connection to an Apache Kafka streaming platform.\n\n`KAFKA` Connections use the following ConnectionParameters.\n\n- Required: `KAFKA_BOOTSTRAP_SERVERS` .\n- Optional: `KAFKA_SSL_ENABLED` , `KAFKA_CUSTOM_CERT` , `KAFKA_SKIP_CUSTOM_CERT_VALIDATION` . These parameters are used to configure SSL with `KAFKA` .\n- Optional: `KAFKA_CLIENT_KEYSTORE` , `KAFKA_CLIENT_KEYSTORE_PASSWORD` , `KAFKA_CLIENT_KEY_PASSWORD` , `ENCRYPTED_KAFKA_CLIENT_KEYSTORE_PASSWORD` , `ENCRYPTED_KAFKA_CLIENT_KEY_PASSWORD` . These parameters are used to configure TLS client configuration with SSL in `KAFKA` .\n- Optional: `KAFKA_SASL_MECHANISM` . Can be specified as `SCRAM-SHA-512` , `GSSAPI` , or `AWS_MSK_IAM` .\n- Optional: `KAFKA_SASL_SCRAM_USERNAME` , `KAFKA_SASL_SCRAM_PASSWORD` , `ENCRYPTED_KAFKA_SASL_SCRAM_PASSWORD` . These parameters are used to configure SASL/SCRAM-SHA-512 authentication with `KAFKA` .\n- Optional: `KAFKA_SASL_GSSAPI_KEYTAB` , `KAFKA_SASL_GSSAPI_KRB5_CONF` , `KAFKA_SASL_GSSAPI_SERVICE` , `KAFKA_SASL_GSSAPI_PRINCIPAL` . These parameters are used to configure SASL/GSSAPI authentication with `KAFKA` .\n- `MONGODB` - Designates a connection to a MongoDB document database.\n\n`MONGODB` Connections use the following ConnectionParameters.\n\n- Required: `CONNECTION_URL` .\n- Required: All of ( `USERNAME` , `PASSWORD` ) or `SECRET_ID` .\n- `SALESFORCE` - Designates a connection to Salesforce using OAuth authencation.\n\n- Requires the `AuthenticationConfiguration` member to be configured.\n- `VIEW_VALIDATION_REDSHIFT` - Designates a connection used for view validation by Amazon Redshift.\n- `VIEW_VALIDATION_ATHENA` - Designates a connection used for view validation by Amazon Athena.\n- `NETWORK` - Designates a network connection to a data source within an Amazon Virtual Private Cloud environment (Amazon VPC).\n\n`NETWORK` Connections do not require ConnectionParameters. Instead, provide a PhysicalConnectionRequirements.\n- `MARKETPLACE` - Uses configuration settings contained in a connector purchased from AWS Marketplace to read from and write to data stores that are not natively supported by AWS Glue .\n\n`MARKETPLACE` Connections use the following ConnectionParameters.\n\n- Required: `CONNECTOR_TYPE` , `CONNECTOR_URL` , `CONNECTOR_CLASS_NAME` , `CONNECTION_URL` .\n- Required for `JDBC` `CONNECTOR_TYPE` connections: All of ( `USERNAME` , `PASSWORD` ) or `SECRET_ID` .\n- `CUSTOM` - Uses configuration settings contained in a custom connector to read from and write to data stores that are not natively supported by AWS Glue .\n\n`SFTP` is not supported.\n\nFor more information about how optional ConnectionProperties are used to configure features in AWS Glue , consult [AWS Glue connection properties](https://docs.aws.amazon.com/glue/latest/dg/connection-defining.html) .\n\nFor more information about how optional ConnectionProperties are used to configure features in AWS Glue Studio, consult [Using connectors and connections](https://docs.aws.amazon.com/glue/latest/ug/connectors-chapter.html) .", + "markdownDescription": "The type of the connection. Currently, these types are supported:\n\n- `JDBC` - Designates a connection to a database through Java Database Connectivity (JDBC).\n\n`JDBC` Connections use the following ConnectionParameters.\n\n- Required: All of ( `HOST` , `PORT` , `JDBC_ENGINE` ) or `JDBC_CONNECTION_URL` .\n- Required: All of ( `USERNAME` , `PASSWORD` ) or `SECRET_ID` .\n- Optional: `JDBC_ENFORCE_SSL` , `CUSTOM_JDBC_CERT` , `CUSTOM_JDBC_CERT_STRING` , `SKIP_CUSTOM_JDBC_CERT_VALIDATION` . These parameters are used to configure SSL with JDBC.\n- `KAFKA` - Designates a connection to an Apache Kafka streaming platform.\n\n`KAFKA` Connections use the following ConnectionParameters.\n\n- Required: `KAFKA_BOOTSTRAP_SERVERS` .\n- Optional: `KAFKA_SSL_ENABLED` , `KAFKA_CUSTOM_CERT` , `KAFKA_SKIP_CUSTOM_CERT_VALIDATION` . These parameters are used to configure SSL with `KAFKA` .\n- Optional: `KAFKA_CLIENT_KEYSTORE` , `KAFKA_CLIENT_KEYSTORE_PASSWORD` , `KAFKA_CLIENT_KEY_PASSWORD` , `ENCRYPTED_KAFKA_CLIENT_KEYSTORE_PASSWORD` , `ENCRYPTED_KAFKA_CLIENT_KEY_PASSWORD` . These parameters are used to configure TLS client configuration with SSL in `KAFKA` .\n- Optional: `KAFKA_SASL_MECHANISM` . Can be specified as `SCRAM-SHA-512` , `GSSAPI` , or `AWS_MSK_IAM` .\n- Optional: `KAFKA_SASL_SCRAM_USERNAME` , `KAFKA_SASL_SCRAM_PASSWORD` , `ENCRYPTED_KAFKA_SASL_SCRAM_PASSWORD` . These parameters are used to configure SASL/SCRAM-SHA-512 authentication with `KAFKA` .\n- Optional: `KAFKA_SASL_GSSAPI_KEYTAB` , `KAFKA_SASL_GSSAPI_KRB5_CONF` , `KAFKA_SASL_GSSAPI_SERVICE` , `KAFKA_SASL_GSSAPI_PRINCIPAL` . These parameters are used to configure SASL/GSSAPI authentication with `KAFKA` .\n- `MONGODB` - Designates a connection to a MongoDB document database.\n\n`MONGODB` Connections use the following ConnectionParameters.\n\n- Required: `CONNECTION_URL` .\n- Required: All of ( `USERNAME` , `PASSWORD` ) or `SECRET_ID` .\n- `VIEW_VALIDATION_REDSHIFT` - Designates a connection used for view validation by Amazon Redshift.\n- `VIEW_VALIDATION_ATHENA` - Designates a connection used for view validation by Amazon Athena.\n- `NETWORK` - Designates a network connection to a data source within an Amazon Virtual Private Cloud environment (Amazon VPC).\n\n`NETWORK` Connections do not require ConnectionParameters. Instead, provide a PhysicalConnectionRequirements.\n- `MARKETPLACE` - Uses configuration settings contained in a connector purchased from AWS Marketplace to read from and write to data stores that are not natively supported by AWS Glue .\n\n`MARKETPLACE` Connections use the following ConnectionParameters.\n\n- Required: `CONNECTOR_TYPE` , `CONNECTOR_URL` , `CONNECTOR_CLASS_NAME` , `CONNECTION_URL` .\n- Required for `JDBC` `CONNECTOR_TYPE` connections: All of ( `USERNAME` , `PASSWORD` ) or `SECRET_ID` .\n- `CUSTOM` - Uses configuration settings contained in a custom connector to read from and write to data stores that are not natively supported by AWS Glue .\n\nAdditionally, a `ConnectionType` for the following SaaS connectors is supported:\n\n- `FACEBOOKADS` - Designates a connection to Facebook Ads.\n- `GOOGLEADS` - Designates a connection to Google Ads.\n- `GOOGLESHEETS` - Designates a connection to Google Sheets.\n- `GOOGLEANALYTICS4` - Designates a connection to Google Analytics 4.\n- `HUBSPOT` - Designates a connection to HubSpot.\n- `INSTAGRAMADS` - Designates a connection to Instagram Ads.\n- `INTERCOM` - Designates a connection to Intercom.\n- `JIRACLOUD` - Designates a connection to Jira Cloud.\n- `MARKETO` - Designates a connection to Adobe Marketo Engage.\n- `NETSUITEERP` - Designates a connection to Oracle NetSuite.\n- `SALESFORCE` - Designates a connection to Salesforce using OAuth authentication.\n- `SALESFORCEMARKETINGCLOUD` - Designates a connection to Salesforce Marketing Cloud.\n- `SALESFORCEPARDOT` - Designates a connection to Salesforce Marketing Cloud Account Engagement (MCAE).\n- `SAPODATA` - Designates a connection to SAP OData.\n- `SERVICENOW` - Designates a connection to ServiceNow.\n- `SLACK` - Designates a connection to Slack.\n- `SNAPCHATADS` - Designates a connection to Snapchat Ads.\n- `STRIPE` - Designates a connection to Stripe.\n- `ZENDESK` - Designates a connection to Zendesk.\n- `ZOHOCRM` - Designates a connection to Zoho CRM.\n\nFor more information on the connection parameters needed for a particular connector, see the documentation for the connector in [Adding an AWS Glue connection](https://docs.aws.amazon.com/glue/latest/dg/console-connections.html) in the AWS Glue User Guide.\n\n`SFTP` is not supported.\n\nFor more information about how optional ConnectionProperties are used to configure features in AWS Glue , consult [AWS Glue connection properties](https://docs.aws.amazon.com/glue/latest/dg/connection-defining.html) .\n\nFor more information about how optional ConnectionProperties are used to configure features in AWS Glue Studio, consult [Using connectors and connections](https://docs.aws.amazon.com/glue/latest/ug/connectors-chapter.html) .", "title": "ConnectionType", "type": "string" }, @@ -106545,7 +106493,7 @@ "type": "number" }, "WorkerType": { - "markdownDescription": "The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.\n\n- For the `G.1X` worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.\n- For the `G.2X` worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.\n- For the `G.4X` worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for AWS Glue version 3.0 or later Spark ETL jobs in the following AWS Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).\n- For the `G.8X` worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for AWS Glue version 3.0 or later Spark ETL jobs, in the same AWS Regions as supported for the `G.4X` worker type.\n- For the `G.025X` worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for AWS Glue version 3.0 streaming jobs.\n- For the `Z.2X` worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.", + "markdownDescription": "The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.\n\n- For the `G.1X` worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 94GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.\n- For the `G.2X` worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 138GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.\n- For the `G.4X` worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for AWS Glue version 3.0 or later Spark ETL jobs in the following AWS Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).\n- For the `G.8X` worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for AWS Glue version 3.0 or later Spark ETL jobs, in the same AWS Regions as supported for the `G.4X` worker type.\n- For the `G.025X` worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk, and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for AWS Glue version 3.0 or later streaming jobs.\n- For the `Z.2X` worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk, and provides up to 8 Ray workers based on the autoscaler.", "title": "WorkerType", "type": "string" } @@ -110741,7 +110689,7 @@ }, "SageMakerMachineLearningModelResourceData": { "$ref": "#/definitions/AWS::Greengrass::ResourceDefinition.SageMakerMachineLearningModelResourceData", - "markdownDescription": "Settings for a machine learning resource saved as an SageMaker training job.", + "markdownDescription": "Settings for a machine learning resource saved as an SageMaker AI training job.", "title": "SageMakerMachineLearningModelResourceData" }, "SecretsManagerSecretResourceData": { @@ -110854,7 +110802,7 @@ "title": "OwnerSetting" }, "SageMakerJobArn": { - "markdownDescription": "The Amazon Resource Name (ARN) of the Amazon SageMaker training job that represents the source model.", + "markdownDescription": "The Amazon Resource Name (ARN) of the Amazon SageMaker AI training job that represents the source model.", "title": "SageMakerJobArn", "type": "string" } @@ -111046,7 +110994,7 @@ }, "SageMakerMachineLearningModelResourceData": { "$ref": "#/definitions/AWS::Greengrass::ResourceDefinitionVersion.SageMakerMachineLearningModelResourceData", - "markdownDescription": "Settings for a machine learning resource saved as an SageMaker training job.", + "markdownDescription": "Settings for a machine learning resource saved as an SageMaker AI training job.", "title": "SageMakerMachineLearningModelResourceData" }, "SecretsManagerSecretResourceData": { @@ -111142,7 +111090,7 @@ "title": "OwnerSetting" }, "SageMakerJobArn": { - "markdownDescription": "The Amazon Resource Name (ARN) of the Amazon SageMaker training job that represents the source model.", + "markdownDescription": "The Amazon Resource Name (ARN) of the Amazon SageMaker AI training job that represents the source model.", "title": "SageMakerJobArn", "type": "string" } @@ -117120,7 +117068,7 @@ "additionalProperties": false, "properties": { "RepositoryName": { - "markdownDescription": "The name of the container repository where the output container image is stored. This name is prefixed by the repository location.", + "markdownDescription": "The name of the container repository where the output container image is stored. This name is prefixed by the repository location. For example, `/repository_name` .", "title": "RepositoryName", "type": "string" }, @@ -117467,7 +117415,7 @@ "additionalProperties": false, "properties": { "RepositoryName": { - "markdownDescription": "The name of the container repository where the output container image is stored. This name is prefixed by the repository location.", + "markdownDescription": "The name of the container repository where the output container image is stored. This name is prefixed by the repository location. For example, `/repository_name` .", "title": "RepositoryName", "type": "string" }, @@ -117644,7 +117592,7 @@ "type": "boolean" }, "TimeoutMinutes": { - "markdownDescription": "The maximum time in minutes that tests are permitted to run.\n\n> The timeoutMinutes attribute is not currently active. This value is ignored.", + "markdownDescription": "The maximum time in minutes that tests are permitted to run.\n\n> The timeout attribute is not currently active. This value is ignored.", "title": "TimeoutMinutes", "type": "number" } @@ -117885,7 +117833,7 @@ "type": "boolean" }, "TimeoutMinutes": { - "markdownDescription": "The maximum time in minutes that tests are permitted to run.\n\n> The timeoutMinutes attribute is not currently active. This value is ignored.", + "markdownDescription": "The maximum time in minutes that tests are permitted to run.\n\n> The timeout attribute is not currently active. This value is ignored.", "title": "TimeoutMinutes", "type": "number" } @@ -119813,7 +119761,7 @@ "properties": { "HealthEventsConfig": { "$ref": "#/definitions/AWS::InternetMonitor::Monitor.HealthEventsConfig", - "markdownDescription": "A complex type with the configuration information that determines the threshold and other conditions for when Internet Monitor creates a health event for an overall performance or availability issue, across an application's geographies.\n\nDefines the percentages, for overall performance scores and availability scores for an application, that are the thresholds for when Amazon CloudWatch Internet Monitor creates a health event. You can override the defaults to set a custom threshold for overall performance or availability scores, or both.\n\nYou can also set thresholds for local health scores,, where Internet Monitor creates a health event when scores cross a threshold for one or more city-networks, in addition to creating an event when an overall score crosses a threshold.\n\nIf you don't set a health event threshold, the default value is 95%.\n\nFor local thresholds, you also set a minimum percentage of overall traffic that is impacted by an issue before Internet Monitor creates an event. In addition, you can disable local thresholds, for performance scores, availability scores, or both.\n\nFor more information, see [Change health event thresholds](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-IM-overview.html#IMUpdateThresholdFromOverview) in the Internet Monitor section of the *CloudWatch User Guide* .", + "markdownDescription": "A complex type with the configuration information that determines the threshold and other conditions for when Internet Monitor creates a health event for an overall performance or availability issue, across an application's geographies.\n\nDefines the percentages, for overall performance scores and availability scores for an application, that are the thresholds for when Internet Monitor creates a health event. You can override the defaults to set a custom threshold for overall performance or availability scores, or both.\n\nYou can also set thresholds for local health scores,, where Internet Monitor creates a health event when scores cross a threshold for one or more city-networks, in addition to creating an event when an overall score crosses a threshold.\n\nIf you don't set a health event threshold, the default value is 95%.\n\nFor local thresholds, you also set a minimum percentage of overall traffic that is impacted by an issue before Internet Monitor creates an event. In addition, you can disable local thresholds, for performance scores, availability scores, or both.\n\nFor more information, see [Change health event thresholds](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-IM-overview.html#IMUpdateThresholdFromOverview) in the Internet Monitor section of the *CloudWatch User Guide* .", "title": "HealthEventsConfig" }, "IncludeLinkedAccounts": { @@ -136378,7 +136326,7 @@ "type": "array" }, "Edition": { - "markdownDescription": "Indicates whether the index is a Enterprise Edition index or a Developer Edition index. Valid values are `DEVELOPER_EDITION` and `ENTERPRISE_EDITION` .", + "markdownDescription": "Indicates whether the index is a Enterprise Edition index, a Developer Edition index, or a GenAI Enterprise Edition index.", "title": "Edition", "type": "string" }, @@ -149491,7 +149439,7 @@ "additionalProperties": false, "properties": { "PolicyDocument": { - "markdownDescription": "Specify the policy, in JSON.\n\n*Data protection policy*\n\nA data protection policy must include two JSON blocks:\n\n- The first block must include both a `DataIdentifer` array and an `Operation` property with an `Audit` action. The `DataIdentifer` array lists the types of sensitive data that you want to mask. For more information about the available options, see [Types of data that you can mask](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/mask-sensitive-log-data-types.html) .\n\nThe `Operation` property with an `Audit` action is required to find the sensitive data terms. This `Audit` action must contain a `FindingsDestination` object. You can optionally use that `FindingsDestination` object to list one or more destinations to send audit findings to. If you specify destinations such as log groups, Firehose streams, and S3 buckets, they must already exist.\n- The second block must include both a `DataIdentifer` array and an `Operation` property with an `Deidentify` action. The `DataIdentifer` array must exactly match the `DataIdentifer` array in the first block of the policy.\n\nThe `Operation` property with the `Deidentify` action is what actually masks the data, and it must contain the `\"MaskConfig\": {}` object. The `\"MaskConfig\": {}` object must be empty.\n\n> The contents of the two `DataIdentifer` arrays must match exactly. \n\nIn addition to the two JSON blocks, the `policyDocument` can also include `Name` , `Description` , and `Version` fields. The `Name` is different than the operation's `policyName` parameter, and is used as a dimension when CloudWatch Logs reports audit findings metrics to CloudWatch .\n\nThe JSON specified in `policyDocument` can be up to 30,720 characters long.\n\n*Subscription filter policy*\n\nA subscription filter policy can include the following attributes in a JSON block:\n\n- *DestinationArn* The ARN of the destination to deliver log events to. Supported destinations are:\n\n- An Kinesis Data Streams data stream in the same account as the subscription policy, for same-account delivery.\n- An Firehose data stream in the same account as the subscription policy, for same-account delivery.\n- A Lambda function in the same account as the subscription policy, for same-account delivery.\n- A logical destination in a different account created with [PutDestination](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDestination.html) , for cross-account delivery. Kinesis Data Streams and Firehose are supported as logical destinations.\n- *RoleArn* The ARN of an IAM role that grants CloudWatch Logs permissions to deliver ingested log events to the destination stream. You don't need to provide the ARN when you are working with a logical destination for cross-account delivery.\n- *FilterPattern* A filter pattern for subscribing to a filtered stream of log events.\n- *Distribution* The method used to distribute log data to the destination. By default, log data is grouped by log stream, but the grouping can be set to `Random` for a more even distribution. This property is only applicable when the destination is an Kinesis Data Streams data stream.", + "markdownDescription": "Specify the policy, in JSON.\n\n*Data protection policy*\n\nA data protection policy must include two JSON blocks:\n\n- The first block must include both a `DataIdentifer` array and an `Operation` property with an `Audit` action. The `DataIdentifer` array lists the types of sensitive data that you want to mask. For more information about the available options, see [Types of data that you can mask](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/mask-sensitive-log-data-types.html) .\n\nThe `Operation` property with an `Audit` action is required to find the sensitive data terms. This `Audit` action must contain a `FindingsDestination` object. You can optionally use that `FindingsDestination` object to list one or more destinations to send audit findings to. If you specify destinations such as log groups, Firehose streams, and S3 buckets, they must already exist.\n- The second block must include both a `DataIdentifer` array and an `Operation` property with an `Deidentify` action. The `DataIdentifer` array must exactly match the `DataIdentifer` array in the first block of the policy.\n\nThe `Operation` property with the `Deidentify` action is what actually masks the data, and it must contain the `\"MaskConfig\": {}` object. The `\"MaskConfig\": {}` object must be empty.\n\n> The contents of the two `DataIdentifer` arrays must match exactly. \n\nIn addition to the two JSON blocks, the `policyDocument` can also include `Name` , `Description` , and `Version` fields. The `Name` is different than the operation's `policyName` parameter, and is used as a dimension when CloudWatch Logs reports audit findings metrics to CloudWatch .\n\nThe JSON specified in `policyDocument` can be up to 30,720 characters long.\n\n*Subscription filter policy*\n\nA subscription filter policy can include the following attributes in a JSON block:\n\n- *DestinationArn* The ARN of the destination to deliver log events to. Supported destinations are:\n\n- An Kinesis Data Streams data stream in the same account as the subscription policy, for same-account delivery.\n- An Firehose data stream in the same account as the subscription policy, for same-account delivery.\n- A Lambda function in the same account as the subscription policy, for same-account delivery.\n- A logical destination in a different account created with [PutDestination](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDestination.html) , for cross-account delivery. Kinesis Data Streams and Firehose are supported as logical destinations.\n- *RoleArn* The ARN of an IAM role that grants CloudWatch Logs permissions to deliver ingested log events to the destination stream. You don't need to provide the ARN when you are working with a logical destination for cross-account delivery.\n- *FilterPattern* A filter pattern for subscribing to a filtered stream of log events.\n- *Distribution* The method used to distribute log data to the destination. By default, log data is grouped by log stream, but the grouping can be set to `Random` for a more even distribution. This property is only applicable when the destination is an Kinesis Data Streams data stream.\n\n*Field index policy*\n\nA field index filter policy can include the following attribute in a JSON block:\n\n- *Fields* The array of field indexes to create.\n\nThe following is an example of an index policy document that creates two indexes, `RequestId` and `TransactionId` .\n\n`\"policyDocument\": \"{ \\\"Fields\\\": [ \\\"RequestId\\\", \\\"TransactionId\\\" ] }\"`\n\n*Transformer policy*\n\nA transformer policy must include one JSON block with the array of processors and their configurations. For more information about available processors, see [Processors that you can use](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CloudWatch-Logs-Transformation.html#CloudWatch-Logs-Transformation-Processors) .", "title": "PolicyDocument", "type": "string" }, @@ -149506,12 +149454,12 @@ "type": "string" }, "Scope": { - "markdownDescription": "Currently the only valid value for this parameter is `ALL` , which specifies that the policy applies to all log groups in the account. If you omit this parameter, the default of `ALL` is used. To scope down a subscription filter policy to a subset of log groups, use the `selectionCriteria` parameter.", + "markdownDescription": "Currently the only valid value for this parameter is `ALL` , which specifies that the policy applies to all log groups in the account. If you omit this parameter, the default of `ALL` is used. To scope down a subscription filter policy to a subset of log groups, use the `SelectionCriteria` parameter.", "title": "Scope", "type": "string" }, "SelectionCriteria": { - "markdownDescription": "Use this parameter to apply a subscription filter policy to a subset of log groups in the account. Currently, the only supported filter is `LogGroupName NOT IN []` . The `selectionCriteria` string can be up to 25KB in length. The length is determined by using its UTF-8 bytes.\n\nUsing the `selectionCriteria` parameter is useful to help prevent infinite loops. For more information, see [Log recursion prevention](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/Subscriptions-recursion-prevention.html) .\n\nSpecifing `selectionCriteria` is valid only when you specify `SUBSCRIPTION_FILTER_POLICY` for `policyType` .", + "markdownDescription": "Use this parameter to apply the new policy to a subset of log groups in the account.\n\nYou need to specify `SelectionCriteria` only when you specify `SUBSCRIPTION_FILTER_POLICY` , `FIELD_INDEX_POLICY` or `TRANSFORMER_POLICY` for `PolicyType` .\n\nIf `PolicyType` is `SUBSCRIPTION_FILTER_POLICY` , the only supported `SelectionCriteria` filter is `LogGroupName NOT IN []`\n\nIf `PolicyType` is `FIELD_INDEX_POLICY` or `TRANSFORMER_POLICY` , the only supported `SelectionCriteria` filter is `LogGroupNamePrefix`\n\nThe `SelectionCriteria` string can be up to 25KB in length. The length is determined by using its UTF-8 bytes.\n\nUsing the `SelectionCriteria` parameter with `SUBSCRIPTION_FILTER_POLICY` is useful to help prevent infinite loops. For more information, see [Log recursion prevention](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/Subscriptions-recursion-prevention.html) .", "title": "SelectionCriteria", "type": "string" } @@ -149593,7 +149541,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "The tags that have been assigned to this delivery.", + "markdownDescription": "An array of key-value pairs to apply to the delivery.\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html) .", "title": "Tags", "type": "array" } @@ -149661,12 +149609,12 @@ "additionalProperties": false, "properties": { "DeliveryDestinationPolicy": { - "markdownDescription": "A structure that contains information about one delivery destination policy.", + "markdownDescription": "An IAM policy that grants permissions to CloudWatch Logs to deliver logs cross-account to a specified destination in this account. For examples of this policy, see [Examples](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDeliveryDestinationPolicy.html#API_PutDeliveryDestinationPolicy_Examples) in the CloudWatch Logs API Reference.", "title": "DeliveryDestinationPolicy", "type": "object" }, "DestinationResourceArn": { - "markdownDescription": "The ARN of the AWS destination that this delivery destination represents. That AWS destination can be a log group in CloudWatch Logs, an Amazon S3 bucket, or a delivery stream in Firehose.", + "markdownDescription": "The ARN of the AWS destination that this delivery destination represents. That AWS destination can be a log group in CloudWatch Logs , an Amazon S3 bucket, or a Firehose stream.", "title": "DestinationResourceArn", "type": "string" }, @@ -149679,7 +149627,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "The tags that have been assigned to this delivery destination.", + "markdownDescription": "An array of key-value pairs to apply to the delivery destination.\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html) .", "title": "Tags", "type": "array" } @@ -149756,7 +149704,7 @@ "type": "string" }, "ResourceArn": { - "markdownDescription": "", + "markdownDescription": "The ARN of the AWS resource that is generating and sending logs. For example, `arn:aws:workmail:us-east-1:123456789012:organization/m-1234EXAMPLEabcd1234abcd1234abcd1234`", "title": "ResourceArn", "type": "string" }, @@ -149764,7 +149712,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "The tags that have been assigned to this delivery source.", + "markdownDescription": "An array of key-value pairs to apply to the delivery source.\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html) .", "title": "Tags", "type": "array" } @@ -165448,7 +165396,7 @@ "type": "string" }, "EngineVersion": { - "markdownDescription": "The Valkey or Redis OSS engine version used by the cluster .", + "markdownDescription": "The Redis engine version used by the cluster .", "title": "EngineVersion", "type": "string" }, @@ -174822,7 +174770,7 @@ "additionalProperties": false, "properties": { "Content": { - "markdownDescription": "The policy text content. You can specify the policy content as a JSON object or a JSON string.\n\n> When you specify the policy content as a JSON string, you can't perform drift detection on the CloudFormation stack. For this reason, we recommend specifying the policy content as a JSON object instead. \n\nThe text that you supply must adhere to the rules of the policy type you specify in the `Type` parameter. The following AWS Organizations quotas are enforced for the maximum size of a policy document:\n\n- Service control policies: 5,120 characters\n- Backup policies: 10,000 characters\n- Tag policies: 10,000 characters\n- Chatbot policies: 10,000 characters\n- AI services opt-out policies: 2,500 characters\n\nFor more information about Organizations service quotas, see [Quotas for AWS Organizations](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) in the *AWS Organizations User Guide* .", + "markdownDescription": "The policy text content. You can specify the policy content as a JSON object or a JSON string.\n\n> When you specify the policy content as a JSON string, you can't perform drift detection on the CloudFormation stack. For this reason, we recommend specifying the policy content as a JSON object instead. \n\nThe text that you supply must adhere to the rules of the policy type you specify in the `Type` parameter. The following AWS Organizations quotas are enforced for the maximum size of a policy document:\n\n- Service control policies: 5,120 characters\n- Resource control policies: 5,120 characters\n- Declarative policies: 10,000 characters\n- Backup policies: 10,000 characters\n- Tag policies: 10,000 characters\n- Chatbot policies: 10,000 characters\n- AI services opt-out policies: 2,500 characters\n\nFor more information about Organizations service quotas, see [Quotas for AWS Organizations](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) in the *AWS Organizations User Guide* .", "title": "Content", "type": "object" }, @@ -182473,7 +182421,7 @@ }, "SageMakerPipelineParameters": { "$ref": "#/definitions/AWS::Pipes::Pipe.PipeTargetSageMakerPipelineParameters", - "markdownDescription": "The parameters for using a SageMaker pipeline as a target.", + "markdownDescription": "The parameters for using a SageMaker AI pipeline as a target.", "title": "SageMakerPipelineParameters" }, "SqsQueueParameters": { @@ -182539,7 +182487,7 @@ "items": { "$ref": "#/definitions/AWS::Pipes::Pipe.SageMakerPipelineParameter" }, - "markdownDescription": "List of Parameter names and values for SageMaker Model Building Pipeline execution.", + "markdownDescription": "List of Parameter names and values for SageMaker AI Model Building Pipeline execution.", "title": "PipelineParameterList", "type": "array" } @@ -182635,12 +182583,12 @@ "additionalProperties": false, "properties": { "Name": { - "markdownDescription": "Name of parameter to start execution of a SageMaker Model Building Pipeline.", + "markdownDescription": "Name of parameter to start execution of a SageMaker AI Model Building Pipeline.", "title": "Name", "type": "string" }, "Value": { - "markdownDescription": "Value of parameter to start execution of a SageMaker Model Building Pipeline.", + "markdownDescription": "Value of parameter to start execution of a SageMaker AI Model Building Pipeline.", "title": "Value", "type": "string" } @@ -209292,28 +209240,18 @@ "additionalProperties": false, "properties": { "ContainsHeader": { - "markdownDescription": "Whether the file has a header row, or the files each have a header row.", - "title": "ContainsHeader", "type": "boolean" }, "Delimiter": { - "markdownDescription": "The delimiter between values in the file.", - "title": "Delimiter", "type": "string" }, "Format": { - "markdownDescription": "File format.", - "title": "Format", "type": "string" }, "StartFromRow": { - "markdownDescription": "A row number to start reading data from.", - "title": "StartFromRow", "type": "number" }, "TextQualifier": { - "markdownDescription": "Text qualifier.", - "title": "TextQualifier", "type": "string" } }, @@ -224491,7 +224429,7 @@ "type": "array" }, "AutoMinorVersionUpgrade": { - "markdownDescription": "Specifies whether minor engine upgrades are applied automatically to the DB cluster during the maintenance window. By default, minor engine upgrades are applied automatically.\n\nValid for Cluster Type: Multi-AZ DB clusters only", + "markdownDescription": "Specifies whether minor engine upgrades are applied automatically to the DB cluster during the maintenance window. By default, minor engine upgrades are applied automatically.\n\nValid for Cluster Type: Aurora DB clusters and Multi-AZ DB cluster", "title": "AutoMinorVersionUpgrade", "type": "boolean" }, @@ -224633,7 +224571,7 @@ }, "MasterUserSecret": { "$ref": "#/definitions/AWS::RDS::DBCluster.MasterUserSecret", - "markdownDescription": "The secret managed by RDS in AWS Secrets Manager for the master user password.\n\nFor more information, see [Password management with AWS Secrets Manager](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-secrets-manager.html) in the *Amazon RDS User Guide* and [Password management with AWS Secrets Manager](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/rds-secrets-manager.html) in the *Amazon Aurora User Guide.*", + "markdownDescription": "The secret managed by RDS in AWS Secrets Manager for the master user password.\n\n> When you restore a DB cluster from a snapshot, Amazon RDS generates a new secret instead of reusing the secret specified in the `SecretArn` property. This ensures that the restored DB cluster is securely managed with a dedicated secret. To maintain consistent integration with your application, you might need to update resource configurations to reference the newly created secret. \n\nFor more information, see [Password management with AWS Secrets Manager](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-secrets-manager.html) in the *Amazon RDS User Guide* and [Password management with AWS Secrets Manager](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/rds-secrets-manager.html) in the *Amazon Aurora User Guide.*", "title": "MasterUserSecret" }, "MasterUsername": { @@ -224642,12 +224580,12 @@ "type": "string" }, "MonitoringInterval": { - "markdownDescription": "The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB cluster. To turn off collecting Enhanced Monitoring metrics, specify `0` .\n\nIf `MonitoringRoleArn` is specified, also set `MonitoringInterval` to a value other than `0` .\n\nValid for Cluster Type: Multi-AZ DB clusters only\n\nValid Values: `0 | 1 | 5 | 10 | 15 | 30 | 60`\n\nDefault: `0`", + "markdownDescription": "The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB cluster. To turn off collecting Enhanced Monitoring metrics, specify `0` .\n\nIf `MonitoringRoleArn` is specified, also set `MonitoringInterval` to a value other than `0` .\n\nValid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters\n\nValid Values: `0 | 1 | 5 | 10 | 15 | 30 | 60`\n\nDefault: `0`", "title": "MonitoringInterval", "type": "number" }, "MonitoringRoleArn": { - "markdownDescription": "The Amazon Resource Name (ARN) for the IAM role that permits RDS to send Enhanced Monitoring metrics to Amazon CloudWatch Logs. An example is `arn:aws:iam:123456789012:role/emaccess` . For information on creating a monitoring role, see [Setting up and enabling Enhanced Monitoring](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Monitoring.OS.html#USER_Monitoring.OS.Enabling) in the *Amazon RDS User Guide* .\n\nIf `MonitoringInterval` is set to a value other than `0` , supply a `MonitoringRoleArn` value.\n\nValid for Cluster Type: Multi-AZ DB clusters only", + "markdownDescription": "The Amazon Resource Name (ARN) for the IAM role that permits RDS to send Enhanced Monitoring metrics to Amazon CloudWatch Logs. An example is `arn:aws:iam:123456789012:role/emaccess` . For information on creating a monitoring role, see [Setting up and enabling Enhanced Monitoring](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Monitoring.OS.html#USER_Monitoring.OS.Enabling) in the *Amazon RDS User Guide* .\n\nIf `MonitoringInterval` is set to a value other than `0` , supply a `MonitoringRoleArn` value.\n\nValid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters", "title": "MonitoringRoleArn", "type": "string" }, @@ -224657,17 +224595,17 @@ "type": "string" }, "PerformanceInsightsEnabled": { - "markdownDescription": "Specifies whether to turn on Performance Insights for the DB cluster.\n\nFor more information, see [Using Amazon Performance Insights](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_PerfInsights.html) in the *Amazon RDS User Guide* .\n\nValid for Cluster Type: Multi-AZ DB clusters only", + "markdownDescription": "Specifies whether to turn on Performance Insights for the DB cluster.\n\nFor more information, see [Using Amazon Performance Insights](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_PerfInsights.html) in the *Amazon RDS User Guide* .\n\nValid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters", "title": "PerformanceInsightsEnabled", "type": "boolean" }, "PerformanceInsightsKmsKeyId": { - "markdownDescription": "The AWS KMS key identifier for encryption of Performance Insights data.\n\nThe AWS KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key.\n\nIf you don't specify a value for `PerformanceInsightsKMSKeyId` , then Amazon RDS uses your default KMS key. There is a default KMS key for your AWS account . Your AWS account has a different default KMS key for each AWS Region .\n\nValid for Cluster Type: Multi-AZ DB clusters only", + "markdownDescription": "The AWS KMS key identifier for encryption of Performance Insights data.\n\nThe AWS KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key.\n\nIf you don't specify a value for `PerformanceInsightsKMSKeyId` , then Amazon RDS uses your default KMS key. There is a default KMS key for your AWS account . Your AWS account has a different default KMS key for each AWS Region .\n\nValid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters", "title": "PerformanceInsightsKmsKeyId", "type": "string" }, "PerformanceInsightsRetentionPeriod": { - "markdownDescription": "The number of days to retain Performance Insights data.\n\nValid for Cluster Type: Multi-AZ DB clusters only\n\nValid Values:\n\n- `7`\n- *month* * 31, where *month* is a number of months from 1-23. Examples: `93` (3 months * 31), `341` (11 months * 31), `589` (19 months * 31)\n- `731`\n\nDefault: `7` days\n\nIf you specify a retention period that isn't valid, such as `94` , Amazon RDS issues an error.", + "markdownDescription": "The number of days to retain Performance Insights data.\n\nValid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters\n\nValid Values:\n\n- `7`\n- *month* * 31, where *month* is a number of months from 1-23. Examples: `93` (3 months * 31), `341` (11 months * 31), `589` (19 months * 31)\n- `731`\n\nDefault: `7` days\n\nIf you specify a retention period that isn't valid, such as `94` , Amazon RDS issues an error.", "title": "PerformanceInsightsRetentionPeriod", "type": "number" }, @@ -229969,7 +229907,7 @@ "type": "array" }, "InvokerRoleName": { - "markdownDescription": "Existing AWS IAM role name in the primary AWS account that will be assumed by AWS Resilience Hub Service Principle to obtain a read-only access to your application resources while running an assessment.\n\n> - You must have `iam:passRole` permission for this role while creating or updating the application.\n> - Currently, `invokerRoleName` accepts only `[A-Za-z0-9_+=,.@-]` characters.", + "markdownDescription": "Existing AWS IAM role name in the primary AWS account that will be assumed by AWS Resilience Hub Service Principle to obtain a read-only access to your application resources while running an assessment.\n\nIf your IAM role includes a path, you must include the path in the `invokerRoleName` parameter. For example, if your IAM role's ARN is `arn:aws:iam:123456789012:role/my-path/role-name` , you should pass `my-path/role-name` .\n\n> - You must have `iam:passRole` permission for this role while creating or updating the application.\n> - Currently, `invokerRoleName` accepts only `[A-Za-z0-9_+=,.@-]` characters.", "title": "InvokerRoleName", "type": "string" }, @@ -235453,7 +235391,7 @@ "title": "BucketEncryption" }, "BucketName": { - "markdownDescription": "A name for the bucket. If you don't specify a name, AWS CloudFormation generates a unique ID and uses that ID for the bucket name. The bucket name must contain only lowercase letters, numbers, periods (.), and dashes (-) and must follow [Amazon S3 bucket restrictions and limitations](https://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html) . For more information, see [Rules for naming Amazon S3 buckets](https://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html#bucketnamingrules) in the *Amazon S3 User Guide* .\n\n> If you specify a name, you can't perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you need to replace the resource, specify a new name.", + "markdownDescription": "A name for the bucket. If you don't specify a name, AWS CloudFormation generates a unique ID and uses that ID for the bucket name. The bucket name must contain only lowercase letters, numbers, periods (.), and dashes (-) and must follow [Amazon S3 bucket restrictions and limitations](https://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html) . For more information, see [Rules for naming Amazon S3 buckets](https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html) in the *Amazon S3 User Guide* .\n\n> If you specify a name, you can't perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you need to replace the resource, specify a new name.", "title": "BucketName", "type": "string" }, @@ -238003,17 +237941,17 @@ "additionalProperties": false, "properties": { "BucketName": { - "markdownDescription": "A name for the bucket. The bucket name must contain only lowercase letters, numbers, and hyphens (-). A directory bucket name must be unique in the chosen Availability Zone. The bucket name must also follow the format `*bucket_base_name* -- *az_id* --x-s3` (for example, `*bucket_base_name* -- *usw2-az1* --x-s3` ). If you don't specify a name, AWS CloudFormation generates a unique ID and uses that ID for the bucket name. For information about bucket naming restrictions, see [Directory bucket naming rules](https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) in the *Amazon S3 User Guide* .\n\n> If you specify a name, you can't perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you need to replace the resource, specify a new name.", + "markdownDescription": "A name for the bucket. The bucket name must contain only lowercase letters, numbers, and hyphens (-). A directory bucket name must be unique in the chosen Zone (Availability Zone or Local Zone). The bucket name must also follow the format `*bucket_base_name* -- *zone_id* --x-s3` (for example, `*bucket_base_name* -- *usw2-az1* --x-s3` ). If you don't specify a name, AWS CloudFormation generates a unique ID and uses that ID for the bucket name. For information about bucket naming restrictions, see [Directory bucket naming rules](https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) in the *Amazon S3 User Guide* .\n\n> If you specify a name, you can't perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you need to replace the resource, specify a new name.", "title": "BucketName", "type": "string" }, "DataRedundancy": { - "markdownDescription": "The number of Availability Zone that's used for redundancy for the bucket.", + "markdownDescription": "The number of Zone (Availability Zone or Local Zone) that's used for redundancy for the bucket.", "title": "DataRedundancy", "type": "string" }, "LocationName": { - "markdownDescription": "The name of the location where the bucket will be created.\n\nFor directory buckets, the name of the location is the AZ ID of the Availability Zone where the bucket will be created. An example AZ ID value is `usw2-az1` .", + "markdownDescription": "The name of the location where the bucket will be created.\n\nFor directory buckets, the name of the location is the Zone ID of the Availability Zone (AZ) or Local Zone (LZ) where the bucket will be created. An example AZ ID value is `usw2-az1` .", "title": "LocationName", "type": "string" } @@ -240361,7 +240299,7 @@ "type": "object" }, "FilterPolicyScope": { - "markdownDescription": "This attribute lets you choose the filtering scope by using one of the following string value types:\n\n- `MessageAttributes` (default) - The filter is applied on the message attributes.\n- `MessageBody` - The filter is applied on the message body.", + "markdownDescription": "This attribute lets you choose the filtering scope by using one of the following string value types:\n\n- `MessageAttributes` (default) - The filter is applied on the message attributes.\n- `MessageBody` - The filter is applied on the message body.\n\n> `Null` is not a valid value for `FilterPolicyScope` . To delete a filter policy, delete the `FilterPolicy` property but keep `FilterPolicyScope` property as is.", "title": "FilterPolicyScope", "type": "string" }, @@ -244252,7 +244190,7 @@ "type": "string" }, "SageMakerImageArn": { - "markdownDescription": "The ARN of the SageMaker image that the image version belongs to.", + "markdownDescription": "The ARN of the SageMaker AI image that the image version belongs to.", "title": "SageMakerImageArn", "type": "string" }, @@ -244316,7 +244254,7 @@ }, "KernelGatewayImageConfig": { "$ref": "#/definitions/AWS::SageMaker::AppImageConfig.KernelGatewayImageConfig", - "markdownDescription": "The configuration for the file system and kernels in the SageMaker image.", + "markdownDescription": "The configuration for the file system and kernels in the SageMaker AI image.", "title": "KernelGatewayImageConfig" }, "Tags": { @@ -244452,7 +244390,7 @@ "properties": { "FileSystemConfig": { "$ref": "#/definitions/AWS::SageMaker::AppImageConfig.FileSystemConfig", - "markdownDescription": "The Amazon Elastic File System storage configuration for a SageMaker image.", + "markdownDescription": "The Amazon Elastic File System storage configuration for a SageMaker AI image.", "title": "FileSystemConfig" }, "KernelSpecs": { @@ -244983,7 +244921,7 @@ "additionalProperties": false, "properties": { "KmsKeyId": { - "markdownDescription": "The AWS Key Management Service ( AWS KMS ) key that Amazon SageMaker uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption.", + "markdownDescription": "The AWS Key Management Service ( AWS KMS ) key that Amazon SageMaker AI uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption.", "title": "KmsKeyId", "type": "string" }, @@ -245363,7 +245301,7 @@ "additionalProperties": false, "properties": { "AppNetworkAccessType": { - "markdownDescription": "Specifies the VPC used for non-EFS traffic. The default value is `PublicInternetOnly` .\n\n- `PublicInternetOnly` - Non-EFS traffic is through a VPC managed by Amazon SageMaker , which allows direct internet access\n- `VpcOnly` - All Studio traffic is through the specified VPC and subnets\n\n*Valid Values* : `PublicInternetOnly | VpcOnly`", + "markdownDescription": "Specifies the VPC used for non-EFS traffic. The default value is `PublicInternetOnly` .\n\n- `PublicInternetOnly` - Non-EFS traffic is through a VPC managed by Amazon SageMaker AI , which allows direct internet access\n- `VpcOnly` - All Studio traffic is through the specified VPC and subnets\n\n*Valid Values* : `PublicInternetOnly | VpcOnly`", "title": "AppNetworkAccessType", "type": "string" }, @@ -245666,7 +245604,7 @@ "type": "string" }, "FileSystemPath": { - "markdownDescription": "The path to the file system directory that is accessible in Amazon SageMaker Studio. Permitted users can access only this directory and below.", + "markdownDescription": "The path to the file system directory that is accessible in Amazon SageMaker AI Studio. Permitted users can access only this directory and below.", "title": "FileSystemPath", "type": "string" } @@ -245729,13 +245667,13 @@ "items": { "$ref": "#/definitions/AWS::SageMaker::Domain.CustomImage" }, - "markdownDescription": "A list of custom SageMaker images that are configured to run as a KernelGateway app.", + "markdownDescription": "A list of custom SageMaker AI images that are configured to run as a KernelGateway app.", "title": "CustomImages", "type": "array" }, "DefaultResourceSpec": { "$ref": "#/definitions/AWS::SageMaker::Domain.ResourceSpec", - "markdownDescription": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the KernelGateway app.\n\n> The Amazon SageMaker Studio UI does not use the default instance type value set here. The default instance type set here is used when Apps are created using the AWS CLI or AWS CloudFormation and the instance type parameter value is not passed.", + "markdownDescription": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker AI image used by the KernelGateway app.\n\n> The Amazon SageMaker AI Studio UI does not use the default instance type value set here. The default instance type set here is used when Apps are created using the AWS CLI or AWS CloudFormation and the instance type parameter value is not passed.", "title": "DefaultResourceSpec" } }, @@ -245748,7 +245686,7 @@ "items": { "$ref": "#/definitions/AWS::SageMaker::Domain.CustomImage" }, - "markdownDescription": "A list of custom SageMaker images that are configured to run as a RSession app.", + "markdownDescription": "A list of custom SageMaker AI images that are configured to run as a RSession app.", "title": "CustomImages", "type": "array" }, @@ -245819,7 +245757,7 @@ "type": "string" }, "SageMakerImageArn": { - "markdownDescription": "The ARN of the SageMaker image that the image version belongs to.", + "markdownDescription": "The ARN of the SageMaker AI image that the image version belongs to.", "title": "SageMakerImageArn", "type": "string" }, @@ -245864,7 +245802,7 @@ "items": { "$ref": "#/definitions/AWS::SageMaker::Domain.CustomFileSystemConfig" }, - "markdownDescription": "The settings for assigning a custom file system to a user profile. Permitted users can access this file system in Amazon SageMaker Studio.\n\nSageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.", + "markdownDescription": "The settings for assigning a custom file system to a user profile. Permitted users can access this file system in Amazon SageMaker AI Studio.\n\nSageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.", "title": "CustomFileSystemConfigs", "type": "array" }, @@ -245912,13 +245850,13 @@ "items": { "type": "string" }, - "markdownDescription": "The security groups for the Amazon Virtual Private Cloud (VPC) that the domain uses for communication.\n\nOptional when the `CreateDomain.AppNetworkAccessType` parameter is set to `PublicInternetOnly` .\n\nRequired when the `CreateDomain.AppNetworkAccessType` parameter is set to `VpcOnly` , unless specified as part of the `DefaultUserSettings` for the domain.\n\nAmazon SageMaker adds a security group to allow NFS traffic from Amazon SageMaker Studio. Therefore, the number of security groups that you can specify is one less than the maximum number shown.\n\nSageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.", + "markdownDescription": "The security groups for the Amazon Virtual Private Cloud (VPC) that the domain uses for communication.\n\nOptional when the `CreateDomain.AppNetworkAccessType` parameter is set to `PublicInternetOnly` .\n\nRequired when the `CreateDomain.AppNetworkAccessType` parameter is set to `VpcOnly` , unless specified as part of the `DefaultUserSettings` for the domain.\n\nAmazon SageMaker AI adds a security group to allow NFS traffic from Amazon SageMaker AI Studio. Therefore, the number of security groups that you can specify is one less than the maximum number shown.\n\nSageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.", "title": "SecurityGroups", "type": "array" }, "SharingSettings": { "$ref": "#/definitions/AWS::SageMaker::Domain.SharingSettings", - "markdownDescription": "Specifies options for sharing Amazon SageMaker Studio notebooks.", + "markdownDescription": "Specifies options for sharing Amazon SageMaker AI Studio notebooks.", "title": "SharingSettings" }, "SpaceStorageSettings": { @@ -247538,7 +247476,7 @@ "title": "Container" }, "ModelName": { - "markdownDescription": "The name of an existing SageMaker model object in your account that you want to deploy with the inference component.", + "markdownDescription": "The name of an existing SageMaker AI model object in your account that you want to deploy with the inference component.", "title": "ModelName", "type": "string" }, @@ -247713,7 +247651,7 @@ "items": { "type": "string" }, - "markdownDescription": "The list of all content type headers that Amazon SageMaker will treat as CSV and capture accordingly.", + "markdownDescription": "The list of all content type headers that Amazon SageMaker AI will treat as CSV and capture accordingly.", "title": "CsvContentTypes", "type": "array" }, @@ -247721,7 +247659,7 @@ "items": { "type": "string" }, - "markdownDescription": "The list of all content type headers that SageMaker will treat as JSON and capture accordingly.", + "markdownDescription": "The list of all content type headers that SageMaker AI will treat as JSON and capture accordingly.", "title": "JsonContentTypes", "type": "array" } @@ -248642,7 +248580,7 @@ "additionalProperties": false, "properties": { "KmsKeyId": { - "markdownDescription": "The AWS Key Management Service ( AWS KMS ) key that Amazon SageMaker uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption.", + "markdownDescription": "The AWS Key Management Service ( AWS KMS ) key that Amazon SageMaker AI uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption.", "title": "KmsKeyId", "type": "string" }, @@ -249202,7 +249140,7 @@ "type": "string" }, "ModelId": { - "markdownDescription": "The SageMaker Model ARN or non- SageMaker Model ID.", + "markdownDescription": "The SageMaker AI Model ARN or non- SageMaker AI Model ID.", "title": "ModelId", "type": "string" }, @@ -249388,7 +249326,7 @@ "items": { "type": "string" }, - "markdownDescription": "SageMaker inference image URI.", + "markdownDescription": "SageMaker AI inference image URI.", "title": "ContainerImage", "type": "array" } @@ -249427,7 +249365,7 @@ "type": "array" }, "TrainingArn": { - "markdownDescription": "The SageMaker training job Amazon Resource Name (ARN)", + "markdownDescription": "The SageMaker AI training job Amazon Resource Name (ARN)", "title": "TrainingArn", "type": "string" }, @@ -249441,14 +249379,14 @@ }, "TrainingEnvironment": { "$ref": "#/definitions/AWS::SageMaker::ModelCard.TrainingEnvironment", - "markdownDescription": "The SageMaker training job image URI.", + "markdownDescription": "The SageMaker AI training job image URI.", "title": "TrainingEnvironment" }, "TrainingMetrics": { "items": { "$ref": "#/definitions/AWS::SageMaker::ModelCard.TrainingMetric" }, - "markdownDescription": "The SageMaker training job results.", + "markdownDescription": "The SageMaker AI training job results.", "title": "TrainingMetrics", "type": "array" }, @@ -249475,7 +249413,7 @@ "additionalProperties": false, "properties": { "Name": { - "markdownDescription": "The name of the result from the SageMaker training job.", + "markdownDescription": "The name of the result from the SageMaker AI training job.", "title": "Name", "type": "string" }, @@ -249485,7 +249423,7 @@ "type": "string" }, "Value": { - "markdownDescription": "The value of a result from the SageMaker training job.", + "markdownDescription": "The value of a result from the SageMaker AI training job.", "title": "Value", "type": "number" } @@ -249903,7 +249841,7 @@ "additionalProperties": false, "properties": { "KmsKeyId": { - "markdownDescription": "The AWS Key Management Service ( AWS KMS ) key that Amazon SageMaker uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption.", + "markdownDescription": "The AWS Key Management Service ( AWS KMS ) key that Amazon SageMaker AI uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption.", "title": "KmsKeyId", "type": "string" }, @@ -251485,7 +251423,7 @@ "additionalProperties": false, "properties": { "KmsKeyId": { - "markdownDescription": "The AWS Key Management Service ( AWS KMS ) key that Amazon SageMaker uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption.", + "markdownDescription": "The AWS Key Management Service ( AWS KMS ) key that Amazon SageMaker AI uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption.", "title": "KmsKeyId", "type": "string" }, @@ -252018,7 +251956,7 @@ "items": { "$ref": "#/definitions/AWS::SageMaker::MonitoringSchedule.MonitoringInput" }, - "markdownDescription": "The array of inputs for the monitoring job. Currently we support monitoring an Amazon SageMaker Endpoint.", + "markdownDescription": "The array of inputs for the monitoring job. Currently we support monitoring an Amazon SageMaker AI Endpoint.", "title": "MonitoringInputs", "type": "array" }, @@ -252038,7 +251976,7 @@ "title": "NetworkConfig" }, "RoleArn": { - "markdownDescription": "The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform tasks on your behalf.", + "markdownDescription": "The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker AI can assume to perform tasks on your behalf.", "title": "RoleArn", "type": "string" }, @@ -252075,7 +252013,7 @@ "additionalProperties": false, "properties": { "KmsKeyId": { - "markdownDescription": "The AWS Key Management Service ( AWS KMS ) key that Amazon SageMaker uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption.", + "markdownDescription": "The AWS Key Management Service ( AWS KMS ) key that Amazon SageMaker AI uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption.", "title": "KmsKeyId", "type": "string" }, @@ -252193,7 +252131,7 @@ "type": "string" }, "ScheduleExpression": { - "markdownDescription": "A cron expression that describes details about the monitoring schedule.\n\nThe supported cron expressions are:\n\n- If you want to set the job to start every hour, use the following:\n\n`Hourly: cron(0 * ? * * *)`\n- If you want to start the job daily:\n\n`cron(0 [00-23] ? * * *)`\n- If you want to run the job one time, immediately, use the following keyword:\n\n`NOW`\n\nFor example, the following are valid cron expressions:\n\n- Daily at noon UTC: `cron(0 12 ? * * *)`\n- Daily at midnight UTC: `cron(0 0 ? * * *)`\n\nTo support running every 6, 12 hours, the following are also supported:\n\n`cron(0 [00-23]/[01-24] ? * * *)`\n\nFor example, the following are valid cron expressions:\n\n- Every 12 hours, starting at 5pm UTC: `cron(0 17/12 ? * * *)`\n- Every two hours starting at midnight: `cron(0 0/2 ? * * *)`\n\n> - Even though the cron expression is set to start at 5PM UTC, note that there could be a delay of 0-20 minutes from the actual requested time to run the execution.\n> - We recommend that if you would like a daily schedule, you do not provide this parameter. Amazon SageMaker will pick a time for running every day. \n\nYou can also specify the keyword `NOW` to run the monitoring job immediately, one time, without recurring.", + "markdownDescription": "A cron expression that describes details about the monitoring schedule.\n\nThe supported cron expressions are:\n\n- If you want to set the job to start every hour, use the following:\n\n`Hourly: cron(0 * ? * * *)`\n- If you want to start the job daily:\n\n`cron(0 [00-23] ? * * *)`\n- If you want to run the job one time, immediately, use the following keyword:\n\n`NOW`\n\nFor example, the following are valid cron expressions:\n\n- Daily at noon UTC: `cron(0 12 ? * * *)`\n- Daily at midnight UTC: `cron(0 0 ? * * *)`\n\nTo support running every 6, 12 hours, the following are also supported:\n\n`cron(0 [00-23]/[01-24] ? * * *)`\n\nFor example, the following are valid cron expressions:\n\n- Every 12 hours, starting at 5pm UTC: `cron(0 17/12 ? * * *)`\n- Every two hours starting at midnight: `cron(0 0/2 ? * * *)`\n\n> - Even though the cron expression is set to start at 5PM UTC, note that there could be a delay of 0-20 minutes from the actual requested time to run the execution.\n> - We recommend that if you would like a daily schedule, you do not provide this parameter. Amazon SageMaker AI will pick a time for running every day. \n\nYou can also specify the keyword `NOW` to run the monitoring job immediately, one time, without recurring.", "title": "ScheduleExpression", "type": "string" } @@ -252301,17 +252239,17 @@ "items": { "type": "string" }, - "markdownDescription": "An array of up to three Git repositories associated with the notebook instance. These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in [AWS CodeCommit](https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) or in any other Git repository. These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see [Associating Git Repositories with SageMaker Notebook Instances](https://docs.aws.amazon.com/sagemaker/latest/dg/nbi-git-repo.html) .", + "markdownDescription": "An array of up to three Git repositories associated with the notebook instance. These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in [AWS CodeCommit](https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) or in any other Git repository. These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see [Associating Git Repositories with SageMaker AI Notebook Instances](https://docs.aws.amazon.com/sagemaker/latest/dg/nbi-git-repo.html) .", "title": "AdditionalCodeRepositories", "type": "array" }, "DefaultCodeRepository": { - "markdownDescription": "The Git repository associated with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in [AWS CodeCommit](https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) or in any other Git repository. When you open a notebook instance, it opens in the directory that contains this repository. For more information, see [Associating Git Repositories with SageMaker Notebook Instances](https://docs.aws.amazon.com/sagemaker/latest/dg/nbi-git-repo.html) .", + "markdownDescription": "The Git repository associated with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in [AWS CodeCommit](https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) or in any other Git repository. When you open a notebook instance, it opens in the directory that contains this repository. For more information, see [Associating Git Repositories with SageMaker AI Notebook Instances](https://docs.aws.amazon.com/sagemaker/latest/dg/nbi-git-repo.html) .", "title": "DefaultCodeRepository", "type": "string" }, "DirectInternetAccess": { - "markdownDescription": "Sets whether SageMaker provides internet access to the notebook instance. If you set this to `Disabled` this notebook instance is able to access resources only in your VPC, and is not be able to connect to SageMaker training and endpoint services unless you configure a NAT Gateway in your VPC.\n\nFor more information, see [Notebook Instances Are Internet-Enabled by Default](https://docs.aws.amazon.com/sagemaker/latest/dg/appendix-additional-considerations.html#appendix-notebook-and-internet-access) . You can set the value of this parameter to `Disabled` only if you set a value for the `SubnetId` parameter.", + "markdownDescription": "Sets whether SageMaker AI provides internet access to the notebook instance. If you set this to `Disabled` this notebook instance is able to access resources only in your VPC, and is not be able to connect to SageMaker AI training and endpoint services unless you configure a NAT Gateway in your VPC.\n\nFor more information, see [Notebook Instances Are Internet-Enabled by Default](https://docs.aws.amazon.com/sagemaker/latest/dg/appendix-additional-considerations.html#appendix-notebook-and-internet-access) . You can set the value of this parameter to `Disabled` only if you set a value for the `SubnetId` parameter.", "title": "DirectInternetAccess", "type": "string" }, @@ -252326,7 +252264,7 @@ "type": "string" }, "KmsKeyId": { - "markdownDescription": "The Amazon Resource Name (ARN) of a AWS Key Management Service key that SageMaker uses to encrypt data on the storage volume attached to your notebook instance. The KMS key you provide must be enabled. For information, see [Enabling and Disabling Keys](https://docs.aws.amazon.com/kms/latest/developerguide/enabling-keys.html) in the *AWS Key Management Service Developer Guide* .", + "markdownDescription": "The Amazon Resource Name (ARN) of a AWS Key Management Service key that SageMaker AI uses to encrypt data on the storage volume attached to your notebook instance. The KMS key you provide must be enabled. For information, see [Enabling and Disabling Keys](https://docs.aws.amazon.com/kms/latest/developerguide/enabling-keys.html) in the *AWS Key Management Service Developer Guide* .", "title": "KmsKeyId", "type": "string" }, @@ -252346,7 +252284,7 @@ "type": "string" }, "RoleArn": { - "markdownDescription": "When you send any requests to AWS resources from the notebook instance, SageMaker assumes this role to perform tasks on your behalf. You must grant this role necessary permissions so SageMaker can perform these tasks. The policy must allow the SageMaker service principal (sagemaker.amazonaws.com) permissions to assume this role. For more information, see [SageMaker Roles](https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html) .\n\n> To be able to pass this role to SageMaker, the caller of this API must have the `iam:PassRole` permission.", + "markdownDescription": "When you send any requests to AWS resources from the notebook instance, SageMaker AI assumes this role to perform tasks on your behalf. You must grant this role necessary permissions so SageMaker AI can perform these tasks. The policy must allow the SageMaker AI service principal (sagemaker.amazonaws.com) permissions to assume this role. For more information, see [SageMaker AI Roles](https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html) .\n\n> To be able to pass this role to SageMaker AI, the caller of this API must have the `iam:PassRole` permission.", "title": "RoleArn", "type": "string" }, @@ -253018,7 +252956,7 @@ "properties": { "DefaultResourceSpec": { "$ref": "#/definitions/AWS::SageMaker::Space.ResourceSpec", - "markdownDescription": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the JupyterServer app. If you use the `LifecycleConfigArns` parameter, then this parameter is also required.", + "markdownDescription": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker AI image used by the JupyterServer app. If you use the `LifecycleConfigArns` parameter, then this parameter is also required.", "title": "DefaultResourceSpec" } }, @@ -253031,13 +252969,13 @@ "items": { "$ref": "#/definitions/AWS::SageMaker::Space.CustomImage" }, - "markdownDescription": "A list of custom SageMaker images that are configured to run as a KernelGateway app.", + "markdownDescription": "A list of custom SageMaker AI images that are configured to run as a KernelGateway app.", "title": "CustomImages", "type": "array" }, "DefaultResourceSpec": { "$ref": "#/definitions/AWS::SageMaker::Space.ResourceSpec", - "markdownDescription": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the KernelGateway app.\n\n> The Amazon SageMaker Studio UI does not use the default instance type value set here. The default instance type set here is used when Apps are created using the AWS CLI or AWS CloudFormation and the instance type parameter value is not passed.", + "markdownDescription": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker AI image used by the KernelGateway app.\n\n> The Amazon SageMaker AI Studio UI does not use the default instance type value set here. The default instance type set here is used when Apps are created using the AWS CLI or AWS CloudFormation and the instance type parameter value is not passed.", "title": "DefaultResourceSpec" } }, @@ -253066,7 +253004,7 @@ "type": "string" }, "SageMakerImageArn": { - "markdownDescription": "The ARN of the SageMaker image that the image version belongs to.", + "markdownDescription": "The ARN of the SageMaker AI image that the image version belongs to.", "title": "SageMakerImageArn", "type": "string" }, @@ -253112,7 +253050,7 @@ "additionalProperties": false, "properties": { "AppType": { - "markdownDescription": "The type of app created within the space.", + "markdownDescription": "The type of app created within the space.\n\nIf using the [UpdateSpace](https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_UpdateSpace.html) API, you can't change the app type of your space by specifying a different value for this field.", "title": "AppType", "type": "string" }, @@ -253125,7 +253063,7 @@ "items": { "$ref": "#/definitions/AWS::SageMaker::Space.CustomFileSystem" }, - "markdownDescription": "A file system, created by you, that you assign to a space for an Amazon SageMaker Domain. Permitted users can access this file system in Amazon SageMaker Studio.", + "markdownDescription": "A file system, created by you, that you assign to a space for an Amazon SageMaker AI Domain. Permitted users can access this file system in Amazon SageMaker AI Studio.", "title": "CustomFileSystems", "type": "array" }, @@ -253410,7 +253348,7 @@ "type": "string" }, "FileSystemPath": { - "markdownDescription": "The path to the file system directory that is accessible in Amazon SageMaker Studio. Permitted users can access only this directory and below.", + "markdownDescription": "The path to the file system directory that is accessible in Amazon SageMaker AI Studio. Permitted users can access only this directory and below.", "title": "FileSystemPath", "type": "string" } @@ -253473,13 +253411,13 @@ "items": { "$ref": "#/definitions/AWS::SageMaker::UserProfile.CustomImage" }, - "markdownDescription": "A list of custom SageMaker images that are configured to run as a KernelGateway app.", + "markdownDescription": "A list of custom SageMaker AI images that are configured to run as a KernelGateway app.", "title": "CustomImages", "type": "array" }, "DefaultResourceSpec": { "$ref": "#/definitions/AWS::SageMaker::UserProfile.ResourceSpec", - "markdownDescription": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the KernelGateway app.\n\n> The Amazon SageMaker Studio UI does not use the default instance type value set here. The default instance type set here is used when Apps are created using the AWS CLI or AWS CloudFormation and the instance type parameter value is not passed.", + "markdownDescription": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker AI image used by the KernelGateway app.\n\n> The Amazon SageMaker AI Studio UI does not use the default instance type value set here. The default instance type set here is used when Apps are created using the AWS CLI or AWS CloudFormation and the instance type parameter value is not passed.", "title": "DefaultResourceSpec" } }, @@ -253510,7 +253448,7 @@ "type": "string" }, "SageMakerImageArn": { - "markdownDescription": "The ARN of the SageMaker image that the image version belongs to.", + "markdownDescription": "The ARN of the SageMaker AI image that the image version belongs to.", "title": "SageMakerImageArn", "type": "string" }, @@ -253555,7 +253493,7 @@ "items": { "$ref": "#/definitions/AWS::SageMaker::UserProfile.CustomFileSystemConfig" }, - "markdownDescription": "The settings for assigning a custom file system to a user profile. Permitted users can access this file system in Amazon SageMaker Studio.\n\nSageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.", + "markdownDescription": "The settings for assigning a custom file system to a user profile. Permitted users can access this file system in Amazon SageMaker AI Studio.\n\nSageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.", "title": "CustomFileSystemConfigs", "type": "array" }, @@ -253598,13 +253536,13 @@ "items": { "type": "string" }, - "markdownDescription": "The security groups for the Amazon Virtual Private Cloud (VPC) that the domain uses for communication.\n\nOptional when the `CreateDomain.AppNetworkAccessType` parameter is set to `PublicInternetOnly` .\n\nRequired when the `CreateDomain.AppNetworkAccessType` parameter is set to `VpcOnly` , unless specified as part of the `DefaultUserSettings` for the domain.\n\nAmazon SageMaker adds a security group to allow NFS traffic from Amazon SageMaker Studio. Therefore, the number of security groups that you can specify is one less than the maximum number shown.\n\nSageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.", + "markdownDescription": "The security groups for the Amazon Virtual Private Cloud (VPC) that the domain uses for communication.\n\nOptional when the `CreateDomain.AppNetworkAccessType` parameter is set to `PublicInternetOnly` .\n\nRequired when the `CreateDomain.AppNetworkAccessType` parameter is set to `VpcOnly` , unless specified as part of the `DefaultUserSettings` for the domain.\n\nAmazon SageMaker AI adds a security group to allow NFS traffic from Amazon SageMaker AI Studio. Therefore, the number of security groups that you can specify is one less than the maximum number shown.\n\nSageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.", "title": "SecurityGroups", "type": "array" }, "SharingSettings": { "$ref": "#/definitions/AWS::SageMaker::UserProfile.SharingSettings", - "markdownDescription": "Specifies options for sharing Amazon SageMaker Studio notebooks.", + "markdownDescription": "Specifies options for sharing Amazon SageMaker AI Studio notebooks.", "title": "SharingSettings" }, "SpaceStorageSettings": { @@ -255095,7 +255033,7 @@ "items": { "$ref": "#/definitions/AWS::SecurityHub::AutomationRule.DateFilter" }, - "markdownDescription": "A timestamp that indicates when this finding record was created.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats that you can send to Security Hub:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )\n\nIf a finding provider sends a finding to Security Hub that contains a timestamp in nanoseconds, we round it to milliseconds. For example, we round `2024-10-31T23:00:00.123456789Z` to `2024-10-31T23:00:00.123Z` .\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", + "markdownDescription": "A timestamp that indicates when this finding record was created.\n\nFor more information about the validation and formatting of timestamp fields in AWS Security Hub , see [Timestamps](https://docs.aws.amazon.com/securityhub/1.0/APIReference/Welcome.html#timestamps) .\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", "title": "CreatedAt", "type": "array" }, @@ -255119,7 +255057,7 @@ "items": { "$ref": "#/definitions/AWS::SecurityHub::AutomationRule.DateFilter" }, - "markdownDescription": "A timestamp that indicates when the potential security issue captured by a finding was first observed by the security findings product.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats that you can send to Security Hub:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )\n\nIf a finding provider sends a finding to Security Hub that contains a timestamp in nanoseconds, we round it to milliseconds. For example, we round `2024-10-31T23:00:00.123456789Z` to `2024-10-31T23:00:00.123Z` .\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", + "markdownDescription": "A timestamp that indicates when the potential security issue captured by a finding was first observed by the security findings product.\n\nFor more information about the validation and formatting of timestamp fields in AWS Security Hub , see [Timestamps](https://docs.aws.amazon.com/securityhub/1.0/APIReference/Welcome.html#timestamps) .\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", "title": "FirstObservedAt", "type": "array" }, @@ -255143,7 +255081,7 @@ "items": { "$ref": "#/definitions/AWS::SecurityHub::AutomationRule.DateFilter" }, - "markdownDescription": "A timestamp that indicates when the potential security issue captured by a finding was most recently observed by the security findings product.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats that you can send to Security Hub:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )\n\nIf a finding provider sends a finding to Security Hub that contains a timestamp in nanoseconds, we round it to milliseconds. For example, we round `2024-10-31T23:00:00.123456789Z` to `2024-10-31T23:00:00.123Z` .\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", + "markdownDescription": "A timestamp that indicates when the security findings provider most recently observed a change in the resource that is involved in the finding.\n\nFor more information about the validation and formatting of timestamp fields in AWS Security Hub , see [Timestamps](https://docs.aws.amazon.com/securityhub/1.0/APIReference/Welcome.html#timestamps) .\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", "title": "LastObservedAt", "type": "array" }, @@ -255159,7 +255097,7 @@ "items": { "$ref": "#/definitions/AWS::SecurityHub::AutomationRule.DateFilter" }, - "markdownDescription": "The timestamp of when the note was updated.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats that you can send to Security Hub:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )\n\nIf a finding provider sends a finding to Security Hub that contains a timestamp in nanoseconds, we round it to milliseconds. For example, we round `2024-10-31T23:00:00.123456789Z` to `2024-10-31T23:00:00.123Z` .\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", + "markdownDescription": "The timestamp of when the note was updated.\n\nFor more information about the validation and formatting of timestamp fields in AWS Security Hub , see [Timestamps](https://docs.aws.amazon.com/securityhub/1.0/APIReference/Welcome.html#timestamps) .\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", "title": "NoteUpdatedAt", "type": "array" }, @@ -255295,7 +255233,7 @@ "items": { "$ref": "#/definitions/AWS::SecurityHub::AutomationRule.DateFilter" }, - "markdownDescription": "A timestamp that indicates when the finding record was most recently updated.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats that you can send to Security Hub:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )\n\nIf a finding provider sends a finding to Security Hub that contains a timestamp in nanoseconds, we round it to milliseconds. For example, we round `2024-10-31T23:00:00.123456789Z` to `2024-10-31T23:00:00.123Z` .\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", + "markdownDescription": "A timestamp that indicates when the finding record was most recently updated.\n\nFor more information about the validation and formatting of timestamp fields in AWS Security Hub , see [Timestamps](https://docs.aws.amazon.com/securityhub/1.0/APIReference/Welcome.html#timestamps) .\n\nArray Members: Minimum number of 1 item. Maximum number of 20 items.", "title": "UpdatedAt", "type": "array" }, @@ -255335,12 +255273,12 @@ "title": "DateRange" }, "End": { - "markdownDescription": "A timestamp that provides the end date for the date filter.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats that you can send to Security Hub:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )\n\nIf a finding provider sends a finding to Security Hub that contains a timestamp in nanoseconds, we round it to milliseconds. For example, we round `2024-10-31T23:00:00.123456789Z` to `2024-10-31T23:00:00.123Z` .", + "markdownDescription": "A timestamp that provides the end date for the date filter.\n\nFor more information about the validation and formatting of timestamp fields in AWS Security Hub , see [Timestamps](https://docs.aws.amazon.com/securityhub/1.0/APIReference/Welcome.html#timestamps) .", "title": "End", "type": "string" }, "Start": { - "markdownDescription": "A timestamp that provides the start date for the date filter.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats that you can send to Security Hub:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )\n\nIf a finding provider sends a finding to Security Hub that contains a timestamp in nanoseconds, we round it to milliseconds. For example, we round `2024-10-31T23:00:00.123456789Z` to `2024-10-31T23:00:00.123Z` .", + "markdownDescription": "A timestamp that provides the start date for the date filter.\n\nFor more information about the validation and formatting of timestamp fields in AWS Security Hub , see [Timestamps](https://docs.aws.amazon.com/securityhub/1.0/APIReference/Welcome.html#timestamps) .", "title": "Start", "type": "string" } @@ -255612,7 +255550,7 @@ "additionalProperties": false, "properties": { "AutoEnableControls": { - "markdownDescription": "Whether to automatically enable new controls when they are added to standards that are enabled.\n\nBy default, this is set to `true` , and new controls are enabled automatically. To not automatically enable new controls, set this to `false` .", + "markdownDescription": "Whether to automatically enable new controls when they are added to standards that are enabled.\n\nBy default, this is set to `true` , and new controls are enabled automatically. To not automatically enable new controls, set this to `false` .\n\nWhen you automatically enable new controls, you can interact with the controls in the console and programmatically immediately after release. However, automatically enabled controls have a temporary default status of `DISABLED` . It can take up to several days for Security Hub to process the control release and designate the control as `ENABLED` in your account. During the processing period, you can manually enable or disable a control, and Security Hub will maintain that designation regardless of whether you have `AutoEnableControls` set to `true` .", "title": "AutoEnableControls", "type": "boolean" }, @@ -255818,7 +255756,7 @@ "items": { "$ref": "#/definitions/AWS::SecurityHub::Insight.DateFilter" }, - "markdownDescription": "A timestamp that indicates when the security findings provider created the potential security issue that a finding reflects.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats that you can send to Security Hub:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )\n\nIf a finding provider sends a finding to Security Hub that contains a timestamp in nanoseconds, we round it to milliseconds. For example, we round `2024-10-31T23:00:00.123456789Z` to `2024-10-31T23:00:00.123Z` .", + "markdownDescription": "A timestamp that indicates when the security findings provider created the potential security issue that a finding reflects.\n\nFor more information about the validation and formatting of timestamp fields in AWS Security Hub , see [Timestamps](https://docs.aws.amazon.com/securityhub/1.0/APIReference/Welcome.html#timestamps) .", "title": "CreatedAt", "type": "array" }, @@ -255898,7 +255836,7 @@ "items": { "$ref": "#/definitions/AWS::SecurityHub::Insight.DateFilter" }, - "markdownDescription": "A timestamp that indicates when the security findings provider first observed the potential security issue that a finding captured.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats that you can send to Security Hub:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )\n\nIf a finding provider sends a finding to Security Hub that contains a timestamp in nanoseconds, we round it to milliseconds. For example, we round `2024-10-31T23:00:00.123456789Z` to `2024-10-31T23:00:00.123Z` .", + "markdownDescription": "A timestamp that indicates when the security findings provider first observed the potential security issue that a finding captured.\n\nFor more information about the validation and formatting of timestamp fields in AWS Security Hub , see [Timestamps](https://docs.aws.amazon.com/securityhub/1.0/APIReference/Welcome.html#timestamps) .", "title": "FirstObservedAt", "type": "array" }, @@ -255922,7 +255860,7 @@ "items": { "$ref": "#/definitions/AWS::SecurityHub::Insight.DateFilter" }, - "markdownDescription": "A timestamp that indicates when the security findings provider most recently observed the potential security issue that a finding captured.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats that you can send to Security Hub:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )\n\nIf a finding provider sends a finding to Security Hub that contains a timestamp in nanoseconds, we round it to milliseconds. For example, we round `2024-10-31T23:00:00.123456789Z` to `2024-10-31T23:00:00.123Z` .", + "markdownDescription": "A timestamp that indicates when the security findings provider most recently observed a change in the resource that is involved in the finding.\n\nFor more information about the validation and formatting of timestamp fields in AWS Security Hub , see [Timestamps](https://docs.aws.amazon.com/securityhub/1.0/APIReference/Welcome.html#timestamps) .", "title": "LastObservedAt", "type": "array" }, @@ -256074,7 +256012,7 @@ "items": { "$ref": "#/definitions/AWS::SecurityHub::Insight.DateFilter" }, - "markdownDescription": "A timestamp that identifies when the process was launched.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats that you can send to Security Hub:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )\n\nIf a finding provider sends a finding to Security Hub that contains a timestamp in nanoseconds, we round it to milliseconds. For example, we round `2024-10-31T23:00:00.123456789Z` to `2024-10-31T23:00:00.123Z` .", + "markdownDescription": "A timestamp that identifies when the process was launched.\n\nFor more information about the validation and formatting of timestamp fields in AWS Security Hub , see [Timestamps](https://docs.aws.amazon.com/securityhub/1.0/APIReference/Welcome.html#timestamps) .", "title": "ProcessLaunchedAt", "type": "array" }, @@ -256114,7 +256052,7 @@ "items": { "$ref": "#/definitions/AWS::SecurityHub::Insight.DateFilter" }, - "markdownDescription": "A timestamp that identifies when the process was terminated.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats that you can send to Security Hub:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )\n\nIf a finding provider sends a finding to Security Hub that contains a timestamp in nanoseconds, we round it to milliseconds. For example, we round `2024-10-31T23:00:00.123456789Z` to `2024-10-31T23:00:00.123Z` .", + "markdownDescription": "A timestamp that identifies when the process was terminated.\n\nFor more information about the validation and formatting of timestamp fields in AWS Security Hub , see [Timestamps](https://docs.aws.amazon.com/securityhub/1.0/APIReference/Welcome.html#timestamps) .", "title": "ProcessTerminatedAt", "type": "array" }, @@ -256338,7 +256276,7 @@ "items": { "$ref": "#/definitions/AWS::SecurityHub::Insight.DateFilter" }, - "markdownDescription": "A timestamp that identifies when the container was started.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats that you can send to Security Hub:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )\n\nIf a finding provider sends a finding to Security Hub that contains a timestamp in nanoseconds, we round it to milliseconds. For example, we round `2024-10-31T23:00:00.123456789Z` to `2024-10-31T23:00:00.123Z` .", + "markdownDescription": "A timestamp that identifies when the container was started.\n\nFor more information about the validation and formatting of timestamp fields in AWS Security Hub , see [Timestamps](https://docs.aws.amazon.com/securityhub/1.0/APIReference/Welcome.html#timestamps) .", "title": "ResourceContainerLaunchedAt", "type": "array" }, @@ -256434,7 +256372,7 @@ "items": { "$ref": "#/definitions/AWS::SecurityHub::Insight.DateFilter" }, - "markdownDescription": "A timestamp that identifies the last observation of a threat intelligence indicator.", + "markdownDescription": "A timestamp that identifies the last observation of a threat intelligence indicator.\n\nFor more information about the validation and formatting of timestamp fields in AWS Security Hub , see [Timestamps](https://docs.aws.amazon.com/securityhub/1.0/APIReference/Welcome.html#timestamps) .", "title": "ThreatIntelIndicatorLastObservedAt", "type": "array" }, @@ -256490,7 +256428,7 @@ "items": { "$ref": "#/definitions/AWS::SecurityHub::Insight.DateFilter" }, - "markdownDescription": "A timestamp that indicates when the security findings provider last updated the finding record.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats that you can send to Security Hub:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )\n\nIf a finding provider sends a finding to Security Hub that contains a timestamp in nanoseconds, we round it to milliseconds. For example, we round `2024-10-31T23:00:00.123456789Z` to `2024-10-31T23:00:00.123Z` .", + "markdownDescription": "A timestamp that indicates when the security findings provider last updated the finding record.\n\nFor more information about the validation and formatting of timestamp fields in AWS Security Hub , see [Timestamps](https://docs.aws.amazon.com/securityhub/1.0/APIReference/Welcome.html#timestamps) .", "title": "UpdatedAt", "type": "array" }, @@ -256568,12 +256506,12 @@ "title": "DateRange" }, "End": { - "markdownDescription": "A timestamp that provides the end date for the date filter.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats that you can send to Security Hub:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )\n\nIf a finding provider sends a finding to Security Hub that contains a timestamp in nanoseconds, we round it to milliseconds. For example, we round `2024-10-31T23:00:00.123456789Z` to `2024-10-31T23:00:00.123Z` .", + "markdownDescription": "A timestamp that provides the end date for the date filter.\n\nFor more information about the validation and formatting of timestamp fields in AWS Security Hub , see [Timestamps](https://docs.aws.amazon.com/securityhub/1.0/APIReference/Welcome.html#timestamps) .", "title": "End", "type": "string" }, "Start": { - "markdownDescription": "A timestamp that provides the start date for the date filter.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats that you can send to Security Hub:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )\n\nIf a finding provider sends a finding to Security Hub that contains a timestamp in nanoseconds, we round it to milliseconds. For example, we round `2024-10-31T23:00:00.123456789Z` to `2024-10-31T23:00:00.123Z` .", + "markdownDescription": "A timestamp that provides the start date for the date filter.\n\nFor more information about the validation and formatting of timestamp fields in AWS Security Hub , see [Timestamps](https://docs.aws.amazon.com/securityhub/1.0/APIReference/Welcome.html#timestamps) .", "title": "Start", "type": "string" } @@ -261372,7 +261310,7 @@ "type": "string" }, "FailureRetentionPeriod": { - "markdownDescription": "The number of days to retain data about failed runs of this canary. If you omit this field, the default of 31 days is used. The valid range is 1 to 455 days.", + "markdownDescription": "The number of days to retain data about failed runs of this canary. If you omit this field, the default of 31 days is used. The valid range is 1 to 455 days.\n\nThis setting affects the range of information returned by [GetCanaryRuns](https://docs.aws.amazon.com/AmazonSynthetics/latest/APIReference/API_GetCanaryRuns.html) , as well as the range of information displayed in the Synthetics console.", "title": "FailureRetentionPeriod", "type": "number" }, @@ -261402,7 +261340,7 @@ "type": "boolean" }, "SuccessRetentionPeriod": { - "markdownDescription": "The number of days to retain data about successful runs of this canary. If you omit this field, the default of 31 days is used. The valid range is 1 to 455 days.", + "markdownDescription": "The number of days to retain data about successful runs of this canary. If you omit this field, the default of 31 days is used. The valid range is 1 to 455 days.\n\nThis setting affects the range of information returned by [GetCanaryRuns](https://docs.aws.amazon.com/AmazonSynthetics/latest/APIReference/API_GetCanaryRuns.html) , as well as the range of information displayed in the Synthetics console.", "title": "SuccessRetentionPeriod", "type": "number" }, @@ -264591,7 +264529,7 @@ "type": "string" }, "ResourceIdentifier": { - "markdownDescription": "The ID or Amazon Resource Name (ARN) of the service network or service.", + "markdownDescription": "The ID or ARN of the service network or service.", "title": "ResourceIdentifier", "type": "string" }, @@ -264671,7 +264609,7 @@ "type": "object" }, "ResourceIdentifier": { - "markdownDescription": "The ID or Amazon Resource Name (ARN) of the service network or service for which the policy is created.", + "markdownDescription": "The ID or ARN of the service network or service for which the policy is created.", "title": "ResourceIdentifier", "type": "string" } @@ -264759,7 +264697,7 @@ "type": "string" }, "ServiceIdentifier": { - "markdownDescription": "The ID or Amazon Resource Name (ARN) of the service.", + "markdownDescription": "The ID or ARN of the service.", "title": "ServiceIdentifier", "type": "string" }, @@ -264979,7 +264917,7 @@ "title": "Action" }, "ListenerIdentifier": { - "markdownDescription": "The ID or Amazon Resource Name (ARN) of the listener.", + "markdownDescription": "The ID or ARN of the listener.", "title": "ListenerIdentifier", "type": "string" }, @@ -264999,7 +264937,7 @@ "type": "number" }, "ServiceIdentifier": { - "markdownDescription": "The ID or Amazon Resource Name (ARN) of the service.", + "markdownDescription": "The ID or ARN of the service.", "title": "ServiceIdentifier", "type": "string" }, @@ -265449,12 +265387,12 @@ "title": "DnsEntry" }, "ServiceIdentifier": { - "markdownDescription": "The ID or Amazon Resource Name (ARN) of the service.", + "markdownDescription": "The ID or ARN of the service.", "title": "ServiceIdentifier", "type": "string" }, "ServiceNetworkIdentifier": { - "markdownDescription": "The ID or Amazon Resource Name (ARN) of the service network. You must use the ARN if the resources specified in the operation are in different accounts.", + "markdownDescription": "The ID or ARN of the service network. You must use an ARN if the resources are in different accounts.", "title": "ServiceNetworkIdentifier", "type": "string" }, @@ -265549,7 +265487,7 @@ "type": "array" }, "ServiceNetworkIdentifier": { - "markdownDescription": "The ID or Amazon Resource Name (ARN) of the service network. You must use the ARN when the resources specified in the operation are in different accounts.", + "markdownDescription": "The ID or ARN of the service network. You must use an ARN if the resources are in different accounts.", "title": "ServiceNetworkIdentifier", "type": "string" }, @@ -269012,7 +268950,7 @@ "title": "ForwardedIPConfig" }, "Limit": { - "markdownDescription": "The limit on requests per 5-minute period for a single aggregation instance for the rate-based rule. If the rate-based statement includes a `ScopeDownStatement` , this limit is applied only to the requests that match the statement.\n\nExamples:\n\n- If you aggregate on just the IP address, this is the limit on requests from any single IP address.\n- If you aggregate on the HTTP method and the query argument name \"city\", then this is the limit on requests for any single method, city pair.", + "markdownDescription": "The limit on requests during the specified evaluation window for a single aggregation instance for the rate-based rule. If the rate-based statement includes a `ScopeDownStatement` , this limit is applied only to the requests that match the statement.\n\nExamples:\n\n- If you aggregate on just the IP address, this is the limit on requests from any single IP address.\n- If you aggregate on the HTTP method and the query argument name \"city\", then this is the limit on requests for any single method, city pair.", "title": "Limit", "type": "number" }, @@ -269286,7 +269224,7 @@ "items": { "$ref": "#/definitions/AWS::WAFv2::RuleGroup.Label" }, - "markdownDescription": "Labels to apply to web requests that match the rule match statement. AWS WAF applies fully qualified labels to matching web requests. A fully qualified label is the concatenation of a label namespace and a rule label. The rule's rule group or web ACL defines the label namespace.\n\nRules that run after this rule in the web ACL can match against these labels using a `LabelMatchStatement` .\n\nFor each label, provide a case-sensitive string containing optional namespaces and a label name, according to the following guidelines:\n\n- Separate each component of the label with a colon.\n- Each namespace or name can have up to 128 characters.\n- You can specify up to 5 namespaces in a label.\n- Don't use the following reserved words in your label specification: `aws` , `waf` , `managed` , `rulegroup` , `webacl` , `regexpatternset` , or `ipset` .\n\nFor example, `myLabelName` or `nameSpace1:nameSpace2:myLabelName` .", + "markdownDescription": "Labels to apply to web requests that match the rule match statement. AWS WAF applies fully qualified labels to matching web requests. A fully qualified label is the concatenation of a label namespace and a rule label. The rule's rule group or web ACL defines the label namespace.\n\n> Any rule that isn't a rule group reference statement or managed rule group statement can add labels to matching web requests. \n\nRules that run after this rule in the web ACL can match against these labels using a `LabelMatchStatement` .\n\nFor each label, provide a case-sensitive string containing optional namespaces and a label name, according to the following guidelines:\n\n- Separate each component of the label with a colon.\n- Each namespace or name can have up to 128 characters.\n- You can specify up to 5 namespaces in a label.\n- Don't use the following reserved words in your label specification: `aws` , `waf` , `managed` , `rulegroup` , `webacl` , `regexpatternset` , or `ipset` .\n\nFor example, `myLabelName` or `nameSpace1:nameSpace2:myLabelName` .", "title": "RuleLabels", "type": "array" }, @@ -270514,7 +270452,7 @@ "items": { "$ref": "#/definitions/AWS::WAFv2::WebACL.RuleActionOverride" }, - "markdownDescription": "Action settings to use in the place of the rule actions that are configured inside the rule group. You specify one override for each rule whose action you want to change.\n\nYou can use overrides for testing, for example you can override all of rule actions to `Count` and then monitor the resulting count metrics to understand how the rule group would handle your web traffic. You can also permanently override some or all actions, to modify how the rule group manages your web traffic.", + "markdownDescription": "Action settings to use in the place of the rule actions that are configured inside the rule group. You specify one override for each rule whose action you want to change.\n\n> Take care to verify the rule names in your overrides. If you provide a rule name that doesn't match the name of any rule in the rule group, AWS WAF doesn't return an error and doesn't apply the override setting. \n\nYou can use overrides for testing, for example you can override all of rule actions to `Count` and then monitor the resulting count metrics to understand how the rule group would handle your web traffic. You can also permanently override some or all actions, to modify how the rule group manages your web traffic.", "title": "RuleActionOverrides", "type": "array" }, @@ -270614,7 +270552,7 @@ "title": "ForwardedIPConfig" }, "Limit": { - "markdownDescription": "The limit on requests per 5-minute period for a single aggregation instance for the rate-based rule. If the rate-based statement includes a `ScopeDownStatement` , this limit is applied only to the requests that match the statement.\n\nExamples:\n\n- If you aggregate on just the IP address, this is the limit on requests from any single IP address.\n- If you aggregate on the HTTP method and the query argument name \"city\", then this is the limit on requests for any single method, city pair.", + "markdownDescription": "The limit on requests during the specified evaluation window for a single aggregation instance for the rate-based rule. If the rate-based statement includes a `ScopeDownStatement` , this limit is applied only to the requests that match the statement.\n\nExamples:\n\n- If you aggregate on just the IP address, this is the limit on requests from any single IP address.\n- If you aggregate on the HTTP method and the query argument name \"city\", then this is the limit on requests for any single method, city pair.", "title": "Limit", "type": "number" }, @@ -271120,7 +271058,7 @@ "items": { "$ref": "#/definitions/AWS::WAFv2::WebACL.Label" }, - "markdownDescription": "Labels to apply to web requests that match the rule match statement. AWS WAF applies fully qualified labels to matching web requests. A fully qualified label is the concatenation of a label namespace and a rule label. The rule's rule group or web ACL defines the label namespace.\n\nRules that run after this rule in the web ACL can match against these labels using a `LabelMatchStatement` .\n\nFor each label, provide a case-sensitive string containing optional namespaces and a label name, according to the following guidelines:\n\n- Separate each component of the label with a colon.\n- Each namespace or name can have up to 128 characters.\n- You can specify up to 5 namespaces in a label.\n- Don't use the following reserved words in your label specification: `aws` , `waf` , `managed` , `rulegroup` , `webacl` , `regexpatternset` , or `ipset` .\n\nFor example, `myLabelName` or `nameSpace1:nameSpace2:myLabelName` .", + "markdownDescription": "Labels to apply to web requests that match the rule match statement. AWS WAF applies fully qualified labels to matching web requests. A fully qualified label is the concatenation of a label namespace and a rule label. The rule's rule group or web ACL defines the label namespace.\n\n> Any rule that isn't a rule group reference statement or managed rule group statement can add labels to matching web requests. \n\nRules that run after this rule in the web ACL can match against these labels using a `LabelMatchStatement` .\n\nFor each label, provide a case-sensitive string containing optional namespaces and a label name, according to the following guidelines:\n\n- Separate each component of the label with a colon.\n- Each namespace or name can have up to 128 characters.\n- You can specify up to 5 namespaces in a label.\n- Don't use the following reserved words in your label specification: `aws` , `waf` , `managed` , `rulegroup` , `webacl` , `regexpatternset` , or `ipset` .\n\nFor example, `myLabelName` or `nameSpace1:nameSpace2:myLabelName` .", "title": "RuleLabels", "type": "array" }, @@ -271214,7 +271152,7 @@ "items": { "$ref": "#/definitions/AWS::WAFv2::WebACL.RuleActionOverride" }, - "markdownDescription": "Action settings to use in the place of the rule actions that are configured inside the rule group. You specify one override for each rule whose action you want to change.\n\nYou can use overrides for testing, for example you can override all of rule actions to `Count` and then monitor the resulting count metrics to understand how the rule group would handle your web traffic. You can also permanently override some or all actions, to modify how the rule group manages your web traffic.", + "markdownDescription": "Action settings to use in the place of the rule actions that are configured inside the rule group. You specify one override for each rule whose action you want to change.\n\n> Take care to verify the rule names in your overrides. If you provide a rule name that doesn't match the name of any rule in the rule group, AWS WAF doesn't return an error and doesn't apply the override setting. \n\nYou can use overrides for testing, for example you can override all of rule actions to `Count` and then monitor the resulting count metrics to understand how the rule group would handle your web traffic. You can also permanently override some or all actions, to modify how the rule group manages your web traffic.", "title": "RuleActionOverrides", "type": "array" }