From 39fb9e957a3de062f21ca1b2c14ef69894767538 Mon Sep 17 00:00:00 2001 From: miguelhar <98769216+miguelhar@users.noreply.github.com> Date: Tue, 12 Sep 2023 11:59:01 -0400 Subject: [PATCH] CDK to TF migration supporting changes (#125) * PLAT-7142: CDK to TF migration support. --- .gitignore | 1 + README.md | 120 +++++++++++++++++- bin/state-migration/README.md | 2 +- examples/deploy/README.md | 116 ++++++----------- examples/deploy/set-mod-version.sh | 7 +- examples/deploy/terraform/cluster.tfvars | 3 +- examples/deploy/terraform/cluster/README.md | 3 +- examples/deploy/terraform/cluster/main.tf | 10 +- .../deploy/terraform/cluster/variables.tf | 86 ++++++++++++- examples/deploy/terraform/infra/README.md | 4 +- examples/deploy/terraform/infra/main.tf | 2 + examples/deploy/terraform/infra/variables.tf | 100 ++++++++++++++- examples/deploy/terraform/nodes/main.tf | 4 +- examples/deploy/terraform/nodes/variables.tf | 1 + examples/deploy/tf.sh | 103 +++++++++++---- modules/eks/README.md | 2 +- modules/eks/variables.tf | 24 ++-- modules/infra/README.md | 2 +- modules/infra/iam.tf | 3 +- modules/infra/submodules/network/main.tf | 1 - modules/infra/variables.tf | 6 +- tests/plan/terraform/README.md | 2 +- tests/plan/terraform/variables.tf | 2 +- 23 files changed, 457 insertions(+), 147 deletions(-) diff --git a/.gitignore b/.gitignore index 853c2c14..83644427 100644 --- a/.gitignore +++ b/.gitignore @@ -10,6 +10,7 @@ *nodes.outputs* **migrated.txt **/deploy-test/* +**terraform.plan **.terraform.lock.hcl* **.terraform.lock.hcl diff --git a/README.md b/README.md index c244e3c1..4f35b8cd 100644 --- a/README.md +++ b/README.md @@ -18,6 +18,10 @@ * Purpose: Ensures the integrity and functionality of the module. * Contents: Contains automation-driven tests intended for validation and continuous integration (CI) checks. +* bin/state-migration/: + * Purpose: Contains automation to perform terraform state migration, from a monolithic module to a multi-module structure. + * Contents: Script and documentation to perform terraform state migration. + Always refer to each section's respective README or documentation for detailed information and usage guidelines. ## Prerequisites @@ -36,7 +40,7 @@ Update the following values: MOD_VERSION='v3.0.0' DEPLOY_DIR='domino-deploy' ``` -:warning: Ensure the DEPLOY_DIR does not exist or is currently empty. +:warning: Ensure the `DEPLOY_DIR` does not exist or is currently empty. ```bash mkdir -p "$DEPLOY_DIR" @@ -52,6 +56,7 @@ with Terraform immediately by creating Terraform configuration files. ``` If successful, you should get a structure similar to this: + ```bash domino-deploy ├── README.md @@ -79,7 +84,7 @@ domino-deploy └── tf.sh ``` -**Note**: It's recommended to go through the README.md within the DEPLOY_DIR for further details. +**Note**: It's recommended to go through the README.md within the `DEPLOY_DIR` for further details. ### 2. Update modules version You can update the modules version using a script or manually. @@ -106,14 +111,119 @@ For example if `MOD_VERSION=v3.0.0` * **nodes/main.tf** : Update `module.nodes.source` from `"./../../../../modules/nodes"` to `github.com/dominodatalab/terraform-aws-eks.git//modules/nodes?ref=v3.0.0` -### 3. Review and Configure `infra.tfvars` -Your initial setup is guided by the Terraform variables in `domino-deploy/terraform/infra.tfvars`. Ensure you review and modify this file as needed. +### 3. Review and Configure `tfvars` + +Consult available variables within each of the modules `variables.tf` + +* `domino-deploy/terraform/infra/variables.tf` + * `deploy_id` + * `region` + * `tags` + * `network` + * `default_node_groups` + * `additional_node_groups` + * `storage` + * `kms` + * `eks` + * `ssh_pvt_key_path` + * `route53_hosted_zone_name` + * `bastion` + +* `domino-deploy/terraform/cluster/variables.tf` + * `eks` + * `kms_info`: :warning: Variable is only intended for migrating infrastructure, it is not recommended to set it. + +* `domino-deploy/terraform/nodes/variables.tf` + * `default_node_groups` + * `additional_node_groups` + +Configure terraform variables at: + +* `domino-deploy/terraform/infra.tfvars` +* `domino-deploy/terraform/cluster.tfvars` +* `domino-deploy/terraform/nodes.tfvars` + +**NOTE**: The `eks` configuration is required in both the `infra` and `cluster` modules because the Kubernetes version is used for installing the `kubectl` binary on the bastion host. Similarly, `default_node_groups` and `additional_node_groups` must be defined in both the `infra` and `nodes` modules, as the `availability zones` for the `nodes` are necessary for setting up the network infrastructure. +The `eks` module will source its information from the `infra` outputs if it is not configured on `cluster.tfvars`, as will the `nodes` module if the variables are not configured on `nodes.tfvars`. We recommended setting the variables in `eks` and `nodes` from the beggining as future kubernetes upgrades will be driven from `cluster.tfvars` and `nodes.tfvars`. ### 4. Create SSH Key pair -The deployment requires an SSH key. Update the `ssh_pvt_key_path` variable in `domino-deploy/terraform/infra.tfvars` with the full path of your key. +The deployment requires an SSH key. Update the `ssh_pvt_key_path` variable in `domino-deploy/terraform/infra.tfvars` with the full path of your key (we recommend you place your key under the `domino-deploy/terraform` directory). If you don't have an SSH key, you can create one using: ```bash ssh-keygen -q -P '' -t rsa -b 4096 -m PEM -f domino.pem && chmod 600 domino.pem ``` + +### 5. Deploy +#### 1. Set `AWS` credentials and verify. +```bash +aws sts get-caller-identity +``` + +#### 2. Change into `domino-deploy`(or whatever your `DEPLOY_DIR` is) + +```bash +cd domino-deploy +``` + +#### 3. Plan and Apply. +:warning: It is recommended to become familiar with the `tf.sh` [usage](./examples/deploy/README.md#usage). + +At this point all requirements should be set to provision the infrastructure. + +For each of the modules, run `init`, `plan`, inspect the plan, then `apply` in the following order: + +1. `infra` +2. `cluster` +3. `nodes` + +Note: You can use `all` instead but it is recommended that the `plan` and `apply` be done one at a time, so that the plans can be carefully examined. + +1. Init all + +```bash +./tf.sh all init +``` + +2. `infra` plan. + +```bash +./tf.sh infra plan +``` +3. :exclamation: Carefully inspect the actions detailed in the `infra` plan for correctness, before proceeding. + +4. `infra` apply + +```bash +./tf.sh infra apply +``` + +5. `cluster` plan + +```bash +./tf.sh cluster plan +``` + +6. :exclamation: Carefully inspect the actions detailed in the `cluster` plan for correctness, before proceeding. + +7. `cluster` apply + +```bash +./tf.sh cluster apply +``` + +8. nodes plan + +```bash +./tf.sh nodes plan +``` +9. :exclamation: Carefully inspect the actions detailed in the `nodes` plan for correctness, before proceeding. + +10. `nodes` apply + +```bash +./tf.sh nodes apply +``` + +### At this point your deployment has been completed. diff --git a/bin/state-migration/README.md b/bin/state-migration/README.md index 7d84c742..415d0d46 100644 --- a/bin/state-migration/README.md +++ b/bin/state-migration/README.md @@ -46,7 +46,7 @@ Ensure all prerequisites are met. * **LEGACY_STATE**: Path to the Terraform state file for the deployment you're migrating. This file is typically named `terraform.tfstate`. 4. Run the script: - * Change onto `DEPLOY_DIR` and run the script + * Change into `DEPLOY_DIR` and run the script ```bash cd $DEPLOY_DIR ./migrate-states.sh diff --git a/examples/deploy/README.md b/examples/deploy/README.md index 77b6fe25..14b9fef3 100644 --- a/examples/deploy/README.md +++ b/examples/deploy/README.md @@ -1,7 +1,7 @@ # Terraform Multi-Module Management ## Overview -The `tf.sh` script provides a convenient method to manage multiple Terraform configurations for various components of a system. The primary modules managed by this script include `infra`, `cluster`, and `nodes`. These components might represent different layers of an infrastructure deployment. +The `tf.sh` script provides a convenient method to manage multiple Terraform configurations for various components of a system. The primary modules managed by this script include `infra`, `cluster`, and `nodes`. These components might represent different layers of an infrastructure deployment. Similarly the `set-mod-version.sh` script helps to set the source module version on all three modules(`infra`, `cluster`, and `nodes`), see [README](../../README.md#Using_script). ## Pre-requisites * Ensure that `terraform` is installed and accessible in your path. @@ -10,7 +10,7 @@ The `tf.sh` script provides a convenient method to manage multiple Terraform con ## Directory Structure The script expects the following directory structure: ``` -examples/deploy +deploy ├── README.md ├── meta.sh ├── set-mod-version.sh @@ -37,11 +37,11 @@ examples/deploy ``` * Each subdirectory under `terraform` (e.g., `infra`, `cluster`, `nodes`) should contain its respective Terraform configurations. -* Each component is expected to have a corresponding `.tfvars` file at the root directory. For instance, for the `infra` component, there should be an `infra.tfvars` in the root directory. +* Each component is expected to have a corresponding `.tfvars` file at the `terraform` directory. For instance, for the `infra` component, there should be an `terraform/infra.tfvars` file. * Each of component's state and output(when the `output` command is invoked) is saved in the `terraform` directory: ```bash -└── examples/deploy/terraform +└─ deploy/terraform    ├── cluster.outputs    ├── cluster.tfstate    ├── infra.outputs @@ -51,56 +51,7 @@ examples/deploy ``` ## Variables structure -The modular design of this Terraform setup allows for a streamlined flow of variable values across different stages of your infrastructure, from the foundational `infra` module up to the more specialized `nodes` module. - - -### Inter-module Variable Propagation - -1. **From `infra` to `cluster`**: - * The `infra` module is where most foundational variables are defined. Once provisioned, these variable values can be consumed by the `cluster` module using Terraform's [remote state data source](https://www.terraform.io/docs/language/state/remote-state-data.html). - -2. **From both `infra` and `cluster` to `nodes`**: - * The `nodes` module consumes variable values from both the `infra` and `cluster` modules. This is achieved by accessing their respective remote states. - -### infra.tfvars -You can find examples in the examples/tfvars directory. This file accommodates all variables defined in modules/infra. - -### cluster.tfvars -This file provides the capability to override the k8s_version variable, aiding in Kubernetes upgrades. - -### nodes.tfvars -This file allows you to override two variables: default_node_groups and additional_node_groups, making it easier to update node groups. - -### Overriding Variables for Kubernetes Upgrades - -The ability to upgrade Kubernetes without affecting other infrastructure components is crucial for maintainability: - -```bash -. -├── README.md -├── cluster.tfvars -├── infra.tfvars -├── nodes.tfvars -├── terraform -│   ├── cluster -│   ├── infra -│   └── nodes -└── tf.sh -``` - -* The `cluster` module accepts a variable named `k8s_version` via the `cluster.tfvars`. -* While the initial value of `k8s_version` comes from the `infra` module, you have the flexibility to overwrite it in the `cluster` module via the the `cluster.tfvars`. This facilitates Kubernetes version upgrades without making changes to the underlying infrastructure set up by the `infra` module. - -### Enhancing Flexibility in Node Configurations - -For node configurations and upgrades, the design follows a similar pattern: - -* The `nodes` module allows you to override the default node configurations (`default_node_groups`) and any additional node configurations (`additional_node_groups`). -* This is done using the `merge` function, ensuring you can easily add or modify node groups as required. -* In scenarios where only the node pool requires an update, you can simply modify the `nodes.tfvars` and run `./tf.sh nodes apply`. This avoids the need to re-apply the `infra` or `cluster` modules, streamlining node management. - -With this structure, the infrastructure maintains a clear hierarchy of variable propagation, ensuring ease of use, flexibility, and minimal disruptions during updates and upgrades. - +See [README](../../README.md#3-review-and-configure-tfvars) ## Usage @@ -110,16 +61,20 @@ To use the script, invoke it with the desired command and component: ./tf.sh ``` -* **component**: The component you wish to apply the command on. Supported components are `infra`, `cluster`, `nodes`, and `all`. Using `all` will apply the command on all components. +* **component**: The component parameter refers to the specific section of your architecture that you wish to target with a command. Supported components include `infra`, `cluster`, `nodes`, and `all`. Selecting all will execute the command across `infra`, `cluster` and `nodes`. + The script uses the component parameter to identify corresponding Terraform directories and to name both the Terraform variables file (`terraform/${component}.tfvars`) and the Terraform state file (`terraform/${component}.tfstate`). If you create a custom folder named `mydir` that includes your Terraform configuration, setup a terraform variables file(`terraform/mydir.tfstate`), and state file(`terraform/mydir.tfstate`) if existing, then you can utilize the tf.sh script to execute Terraform commands. For example, running `./tf.sh mydir plan`. -* **command**: Supported commands include: - * init: Initializes the Terraform configurations. - * plan: Shows the execution plan of Terraform. - * apply: Applies the Terraform configurations. - * destroy: Destroys the Terraform resources. - * output: Shows the output values of your configurations. - * refresh: Refreshes the Terraform state file. + It's important to note that your custom directory, mydir, ***will not*** be included when using the `all` value for components. +* **command**: Supported commands include: + * `init`: Initializes the Terraform configurations. + * `plan`: Shows the execution plan of Terraform. + * `apply`: Applies the Terraform configurations. + * `destroy`: Destroys the Terraform resources. + * `output`: Shows the output values of your configurations. + * `refresh`: Refreshes the Terraform state file. + * `plan_out`: Generates a plan and writes it to `terraform/${component}-terraform.plan`. + * `apply_plan`: Applies plan located at `terraform/${component}-terraform.plan`. ## Examples @@ -141,6 +96,18 @@ To use the script, invoke it with the desired command and component: ./tf.sh all destroy ``` +* To perform a plan and write it to a file(the plan file will be stored at: `terraform/${component}-terraform.plan`): + +```bash +./tf.sh cluster plan_out +``` + +* To apply a a previously generated plan stored at `terraform/${component}-terraform.plan` for this example `terraform/cluster-terraform.plan`: + +```bash +./tf.sh cluster apply_plan +``` + ## Common Operations For some frequently performed operations, follow the steps outlined below: @@ -149,12 +116,12 @@ For some frequently performed operations, follow the steps outlined below: See the repo's [README](../../README.md#bootstrap-module) for how to bootstrap the module. ### Updating the modules' version -See `README` [Update modules version](../../README.md#update-modules-version) +See `README` [Update modules version](../../README.md#2-update-modules-version) ### Kubernetes Upgrade: In order to update Kubernetes we will need to update the `cluster` and the `nodes`. -1. Update the `k8s-version` variable in the `cluster.tfvars` file. +1. Set the `eks.k8s_version` variable to desired version(At most it can be 2 minor versions ahead.) 2. Update cluster: 1. Plan and review the changes: ```bash @@ -165,6 +132,7 @@ In order to update Kubernetes we will need to update the `cluster` and the `node ./tf.sh cluster apply ``` 3. Update nodes: +Given that the nodes source the k8s version from `eks` we just need to plan and apply. 1. Plan and review the changes: ```bash ./tf.sh nodes plan @@ -175,14 +143,12 @@ In order to update Kubernetes we will need to update the `cluster` and the `node ``` ### Nodes Upgrade: -In order to just update the nodes to the latest AMI for the existing version. - -1. Update nodes: - 1. Plan and review the changes: - ```bash - ./tf.sh nodes plan - ``` - 2. Apply the changes: - ```bash - ./tf.sh nodes apply - ``` +Given that the nodes module looks for the latest AMI we just need to plan and apply: +1. Plan and review the changes: + ```bash + ./tf.sh nodes plan + ``` +2. Apply the changes: + ```bash + ./tf.sh nodes apply + ``` diff --git a/examples/deploy/set-mod-version.sh b/examples/deploy/set-mod-version.sh index 75438d25..ec48e21c 100755 --- a/examples/deploy/set-mod-version.sh +++ b/examples/deploy/set-mod-version.sh @@ -2,6 +2,11 @@ set -euo pipefail validate_mod_version() { + [[ -n "${MOD_VALIDATION_OFF:-}" ]] && { + echo 'MOD_VALIDATION_OFF is set, skipping module version validation' + return + } + url="https://api.github.com/repos/dominodatalab/terraform-aws-eks/tags" local curl_cmd=("curl" "-s") @@ -48,7 +53,7 @@ MOD_VERSION="$1" [ -z "${MOD_VERSION// /}" ] && { echo "Provide a module version in the format $(vX.X.X), ie $(v3.0.0)" && exit 1; } SH_DIR="$(realpath "$(dirname "${BASH_SOURCE[0]}")")" - source "${SH_DIR}/meta.sh" + validate_mod_version set_module_version diff --git a/examples/deploy/terraform/cluster.tfvars b/examples/deploy/terraform/cluster.tfvars index f43f3211..6bca19ea 100644 --- a/examples/deploy/terraform/cluster.tfvars +++ b/examples/deploy/terraform/cluster.tfvars @@ -1 +1,2 @@ -k8s_version = null +eks = null +kms_info = null diff --git a/examples/deploy/terraform/cluster/README.md b/examples/deploy/terraform/cluster/README.md index 75f9ede0..c2bccb40 100644 --- a/examples/deploy/terraform/cluster/README.md +++ b/examples/deploy/terraform/cluster/README.md @@ -30,7 +30,8 @@ | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| -| [k8s\_version](#input\_k8s\_version) | Update k8s version. | `string` | `null` | no | +| [eks](#input\_eks) | creation\_role\_name = Name of the role to import.
k8s\_version = EKS cluster k8s version.
kubeconfig = {
extra\_args = Optional extra args when generating kubeconfig.
path = Fully qualified path name to write the kubeconfig file.
}
public\_access = {
enabled = Enable EKS API public endpoint.
cidrs = List of CIDR ranges permitted for accessing the EKS public endpoint.
}
Custom role maps for aws auth configmap
custom\_role\_maps = {
rolearn = string
username = string
groups = list(string)
}
master\_role\_names = IAM role names to be added as masters in eks.
cluster\_addons = EKS cluster addons. vpc-cni is installed separately.
vpc\_cni = Configuration for AWS VPC CNI
ssm\_log\_group\_name = CloudWatch log group to send the SSM session logs to.
identity\_providers = Configuration for IDP(Identity Provider).
} |
object({
creation_role_name = optional(string, null)
k8s_version = optional(string)
kubeconfig = optional(object({
extra_args = optional(string)
path = optional(string)
}), {})
public_access = optional(object({
enabled = optional(bool)
cidrs = optional(list(string))
}), {})
custom_role_maps = optional(list(object({
rolearn = string
username = string
groups = list(string)
})))
master_role_names = optional(list(string))
cluster_addons = optional(list(string))
ssm_log_group_name = optional(string)
vpc_cni = optional(object({
prefix_delegation = optional(bool)
}))
identity_providers = optional(list(object({
client_id = string
groups_claim = optional(string)
groups_prefix = optional(string)
identity_provider_config_name = string
issuer_url = optional(string)
required_claims = optional(string)
username_claim = optional(string)
username_prefix = optional(string)
})))
})
| `null` | no | +| [kms\_info](#input\_kms\_info) | Overrides the KMS key information. Meant for migrated configurations.
{
key\_id = KMS key id.
key\_arn = KMS key arn.
enabled = KMS key is enabled.
} |
object({
key_id = string
key_arn = string
enabled = bool
})
| `null` | no | ## Outputs diff --git a/examples/deploy/terraform/cluster/main.tf b/examples/deploy/terraform/cluster/main.tf index 606dc558..3a0164cf 100644 --- a/examples/deploy/terraform/cluster/main.tf +++ b/examples/deploy/terraform/cluster/main.tf @@ -8,10 +8,8 @@ data "terraform_remote_state" "infra" { locals { infra = data.terraform_remote_state.infra.outputs.infra - - eks = var.k8s_version != null ? merge(local.infra.eks, { - k8s_version = var.k8s_version - }) : local.infra.eks + eks = var.eks != null ? var.eks : local.infra.eks + kms = var.kms_info != null ? var.kms_info : local.infra.kms } module "eks" { @@ -24,7 +22,7 @@ module "eks" { efs_security_group = local.infra.efs_security_group eks = local.eks network_info = local.infra.network - kms_info = local.infra.kms + kms_info = local.kms bastion_info = local.infra.bastion create_eks_role_arn = local.infra.create_eks_role_arn tags = local.infra.tags @@ -38,8 +36,6 @@ provider "aws" { } } - - terraform { required_version = ">= 1.4.0" required_providers { diff --git a/examples/deploy/terraform/cluster/variables.tf b/examples/deploy/terraform/cluster/variables.tf index 38f3e912..d643dd67 100644 --- a/examples/deploy/terraform/cluster/variables.tf +++ b/examples/deploy/terraform/cluster/variables.tf @@ -1,7 +1,81 @@ -## Used to overwrite the `k8s_version` provided at initial creation. -## When upgrading k8s, create/modify tfvars with desired `k8s_version` value. -variable "k8s_version" { - description = "Update k8s version." - type = string - default = null +## Used to overwrite the `eks` variable passed through the `infra` outputs. + +variable "eks" { + description = < [bastion](#input\_bastion) | enabled = Create bastion host.
ami = Ami id. Defaults to latest 'amazon\_linux\_2' ami.
instance\_type = Instance type.
authorized\_ssh\_ip\_ranges = List of CIDR ranges permitted for the bastion ssh access.
username = Bastion user.
install\_binaries = Toggle to install required Domino binaries in the bastion. |
object({
enabled = optional(bool)
ami_id = optional(string)
instance_type = optional(string)
authorized_ssh_ip_ranges = optional(list(string))
username = optional(string)
install_binaries = optional(bool)
})
| n/a | yes | | [default\_node\_groups](#input\_default\_node\_groups) | EKS managed node groups definition. |
object(
{
compute = object(
{
ami = optional(string, null)
bootstrap_extra_args = optional(string, "")
instance_types = optional(list(string), ["m5.2xlarge"])
spot = optional(bool, false)
min_per_az = optional(number, 0)
max_per_az = optional(number, 10)
desired_per_az = optional(number, 0)
availability_zone_ids = list(string)
labels = optional(map(string), {
"dominodatalab.com/node-pool" = "default"
})
taints = optional(list(object({
key = string
value = optional(string)
effect = string
})), [])
tags = optional(map(string), {})
gpu = optional(bool, null)
volume = optional(object({
size = optional(number, 1000)
type = optional(string, "gp3")
}), {
size = 1000
type = "gp3"
}
)
}),
platform = object(
{
ami = optional(string, null)
bootstrap_extra_args = optional(string, "")
instance_types = optional(list(string), ["m5.2xlarge"])
spot = optional(bool, false)
min_per_az = optional(number, 1)
max_per_az = optional(number, 10)
desired_per_az = optional(number, 1)
availability_zone_ids = list(string)
labels = optional(map(string), {
"dominodatalab.com/node-pool" = "platform"
})
taints = optional(list(object({
key = string
value = optional(string)
effect = string
})), [])
tags = optional(map(string), {})
gpu = optional(bool, null)
volume = optional(object({
size = optional(number, 100)
type = optional(string, "gp3")
}), {
size = 100
type = "gp3"
}
)
}),
gpu = object(
{
ami = optional(string, null)
bootstrap_extra_args = optional(string, "")
instance_types = optional(list(string), ["g4dn.xlarge"])
spot = optional(bool, false)
min_per_az = optional(number, 0)
max_per_az = optional(number, 10)
desired_per_az = optional(number, 0)
availability_zone_ids = list(string)
labels = optional(map(string), {
"dominodatalab.com/node-pool" = "default-gpu"
"nvidia.com/gpu" = true
})
taints = optional(list(object({
key = string
value = optional(string)
effect = string
})), [{
key = "nvidia.com/gpu"
value = "true"
effect = "NO_SCHEDULE"
}
])
tags = optional(map(string))
gpu = optional(bool)
volume = optional(object({
size = optional(number)
type = optional(string)
}))
})
})
| n/a | yes | | [deploy\_id](#input\_deploy\_id) | Domino Deployment ID. | `string` | n/a | yes | -| [eks](#input\_eks) | k8s\_version = EKS cluster k8s version.
kubeconfig = {
extra\_args = Optional extra args when generating kubeconfig.
path = Fully qualified path name to write the kubeconfig file.
}
public\_access = {
enabled = Enable EKS API public endpoint.
cidrs = List of CIDR ranges permitted for accessing the EKS public endpoint.
}
"Custom role maps for aws auth configmap
custom\_role\_maps = {
rolearn = string
username = string
groups = list(string)
}
master\_role\_names = IAM role names to be added as masters in eks.
cluster\_addons = EKS cluster addons. vpc-cni is installed separately.
vpc\_cni = Configuration for AWS VPC CNI
ssm\_log\_group\_name = CloudWatch log group to send the SSM session logs to.
identity\_providers = Configuration for IDP(Identity Provider).
} |
object({
k8s_version = optional(string)
kubeconfig = optional(object({
extra_args = optional(string)
path = optional(string)
}), {})
public_access = optional(object({
enabled = optional(bool)
cidrs = optional(list(string))
}), {})
custom_role_maps = optional(list(object({
rolearn = string
username = string
groups = list(string)
})))
master_role_names = optional(list(string))
cluster_addons = optional(list(string))
ssm_log_group_name = optional(string)
vpc_cni = optional(object({
prefix_delegation = optional(bool)
}))
identity_providers = optional(list(object({
client_id = string
groups_claim = optional(string)
groups_prefix = optional(string)
identity_provider_config_name = string
issuer_url = optional(string)
required_claims = optional(string)
username_claim = optional(string)
username_prefix = optional(string)
})))
})
| `{}` | no | +| [eks](#input\_eks) | creation\_role\_name = Name of the role to import.
k8s\_version = EKS cluster k8s version.
kubeconfig = {
extra\_args = Optional extra args when generating kubeconfig.
path = Fully qualified path name to write the kubeconfig file.
}
public\_access = {
enabled = Enable EKS API public endpoint.
cidrs = List of CIDR ranges permitted for accessing the EKS public endpoint.
}
Custom role maps for aws auth configmap
custom\_role\_maps = {
rolearn = string
username = string
groups = list(string)
}
master\_role\_names = IAM role names to be added as masters in eks.
cluster\_addons = EKS cluster addons. vpc-cni is installed separately.
vpc\_cni = Configuration for AWS VPC CNI
ssm\_log\_group\_name = CloudWatch log group to send the SSM session logs to.
identity\_providers = Configuration for IDP(Identity Provider).
} |
object({
creation_role_name = optional(string, null)
k8s_version = optional(string)
kubeconfig = optional(object({
extra_args = optional(string)
path = optional(string)
}), {})
public_access = optional(object({
enabled = optional(bool)
cidrs = optional(list(string))
}), {})
custom_role_maps = optional(list(object({
rolearn = string
username = string
groups = list(string)
})))
master_role_names = optional(list(string))
cluster_addons = optional(list(string))
ssm_log_group_name = optional(string)
vpc_cni = optional(object({
prefix_delegation = optional(bool)
}))
identity_providers = optional(list(object({
client_id = string
groups_claim = optional(string)
groups_prefix = optional(string)
identity_provider_config_name = string
issuer_url = optional(string)
required_claims = optional(string)
username_claim = optional(string)
username_prefix = optional(string)
})))
})
| `{}` | no | | [kms](#input\_kms) | enabled = Toggle,if set use either the specified KMS key\_id or a Domino-generated one.
key\_id = optional(string, null) |
object({
enabled = optional(bool)
key_id = optional(string)
})
| n/a | yes | +| [network](#input\_network) | vpc = {
id = Existing vpc id, it will bypass creation by this module.
subnets = {
private = Existing private subnets.
public = Existing public subnets.
pod = Existing pod subnets.
}), {})
}), {})
network\_bits = {
public = Number of network bits to allocate to the public subnet. i.e /27 -> 32 IPs.
private = Number of network bits to allocate to the private subnet. i.e /19 -> 8,192 IPs.
pod = Number of network bits to allocate to the private subnet. i.e /19 -> 8,192 IPs.
}
cidrs = {
vpc = The IPv4 CIDR block for the VPC.
pod = The IPv4 CIDR block for the Pod subnets.
}
use\_pod\_cidr = Use additional pod CIDR range (ie 100.64.0.0/16) for pod networking. |
object({
vpc = optional(object({
id = optional(string, null)
subnets = optional(object({
private = optional(list(string), [])
public = optional(list(string), [])
pod = optional(list(string), [])
}), {})
}), {})
network_bits = optional(object({
public = optional(number, 27)
private = optional(number, 19)
pod = optional(number, 19)
}
), {})
cidrs = optional(object({
vpc = optional(string, "10.0.0.0/16")
pod = optional(string, "100.64.0.0/16")
}), {})
use_pod_cidr = optional(bool, true)
})
| `{}` | no | | [region](#input\_region) | AWS region for the deployment | `string` | n/a | yes | | [route53\_hosted\_zone\_name](#input\_route53\_hosted\_zone\_name) | Optional hosted zone for External DNS zone. | `string` | `null` | no | | [ssh\_pvt\_key\_path](#input\_ssh\_pvt\_key\_path) | SSH private key filepath. | `string` | n/a | yes | +| [storage](#input\_storage) | storage = {
efs = {
access\_point\_path = Filesystem path for efs.
backup\_vault = {
create = Create backup vault for EFS toggle.
force\_destroy = Toggle to allow automatic destruction of all backups when destroying.
backup = {
schedule = Cron-style schedule for EFS backup vault (default: once a day at 12pm).
cold\_storage\_after = Move backup data to cold storage after this many days.
delete\_after = Delete backup data after this many days.
}
}
}
s3 = {
force\_destroy\_on\_deletion = Toogle to allow recursive deletion of all objects in the s3 buckets. if 'false' terraform will NOT be able to delete non-empty buckets.
}
ecr = {
force\_destroy\_on\_deletion = Toogle to allow recursive deletion of all objects in the ECR repositories. if 'false' terraform will NOT be able to delete non-empty repositories.
}
}
} |
object({
efs = optional(object({
access_point_path = optional(string, "/domino")
backup_vault = optional(object({
create = optional(bool, true)
force_destroy = optional(bool, true)
backup = optional(object({
schedule = optional(string, "0 12 * * ? *")
cold_storage_after = optional(number, 35)
delete_after = optional(number, 125)
}), {})
}), {})
}), {})
s3 = optional(object({
force_destroy_on_deletion = optional(bool, true)
}), {})
ecr = optional(object({
force_destroy_on_deletion = optional(bool, true)
}), {})
})
| `{}` | no | | [tags](#input\_tags) | Deployment tags. | `map(string)` | n/a | yes | ## Outputs diff --git a/examples/deploy/terraform/infra/main.tf b/examples/deploy/terraform/infra/main.tf index 4c3db98f..ee00c585 100644 --- a/examples/deploy/terraform/infra/main.tf +++ b/examples/deploy/terraform/infra/main.tf @@ -6,8 +6,10 @@ module "infra" { bastion = var.bastion default_node_groups = var.default_node_groups + network = var.network eks = var.eks kms = var.kms + storage = var.storage region = var.region route53_hosted_zone_name = var.route53_hosted_zone_name ssh_pvt_key_path = var.ssh_pvt_key_path diff --git a/examples/deploy/terraform/infra/variables.tf b/examples/deploy/terraform/infra/variables.tf index 9ced1952..afb1b520 100644 --- a/examples/deploy/terraform/infra/variables.tf +++ b/examples/deploy/terraform/infra/variables.tf @@ -13,6 +13,52 @@ variable "tags" { type = map(string) } +variable "network" { + description = < 32 IPs. + private = Number of network bits to allocate to the private subnet. i.e /19 -> 8,192 IPs. + pod = Number of network bits to allocate to the private subnet. i.e /19 -> 8,192 IPs. + } + cidrs = { + vpc = The IPv4 CIDR block for the VPC. + pod = The IPv4 CIDR block for the Pod subnets. + } + use_pod_cidr = Use additional pod CIDR range (ie 100.64.0.0/16) for pod networking. + EOF + + type = object({ + vpc = optional(object({ + id = optional(string, null) + subnets = optional(object({ + private = optional(list(string), []) + public = optional(list(string), []) + pod = optional(list(string), []) + }), {}) + }), {}) + network_bits = optional(object({ + public = optional(number, 27) + private = optional(number, 19) + pod = optional(number, 19) + } + ), {}) + cidrs = optional(object({ + vpc = optional(string, "10.0.0.0/16") + pod = optional(string, "100.64.0.0/16") + }), {}) + use_pod_cidr = optional(bool, true) + }) + + default = {} +} variable "default_node_groups" { description = "EKS managed node groups definition." @@ -138,6 +184,54 @@ variable "additional_node_groups" { default = {} } +variable "storage" { + description = < " - echo "Components: infra, cluster, nodes, all" - echo "Supported commands: init, plan, apply, destroy, output" + + echo -e "\nComponents:" + echo -e " infra \tManage the infrastructure components." + echo -e " cluster \tManage the cluster components." + echo -e " nodes \tManage the node components." + echo -e " all \tManage all components." + echo "Note: If an unlisted component is provided, the script will attempt to execute the given command, assuming the corresponding directory is properly configured." + + echo -e "\nCommands:" + echo -e " init \tInitialize the working directory." + echo -e " validate \tCheck the syntax and validate the configuration." + echo -e " plan \tGenerate an execution plan." + echo -e " plan_out \tGenerate a plan and save it to a file." + echo -e " apply \tExecute the actions proposed in the Terraform plan." + echo -e " refresh \tUpdate local state with remote resources." + echo -e " destroy \tDestroy the Terraform-managed infrastructure." + echo -e " output \tDisplay outputs from the Terraform state." + exit 1 fi @@ -104,14 +152,8 @@ component=$1 command=$2 case $component in -infra) - run_tf_command "${BASE_TF_DIR}/infra" "$command" - ;; -cluster) - run_tf_command "${BASE_TF_DIR}/cluster" "$command" - ;; -nodes) - run_tf_command "${BASE_TF_DIR}/nodes" "$command" +infra | cluster | nodes) + run_tf_command "${BASE_TF_DIR}/${component}" "$command" ;; all) if [[ "$command" == "destroy" ]]; then @@ -125,8 +167,19 @@ all) fi ;; *) - echo "Unknown component: $component" - echo "Available components: infra, cluster, nodes, all" - exit 1 + echo "Default components: infra, cluster, nodes, all" + if [[ -d "${BASE_TF_DIR}/${component}" ]]; then + if ls "${BASE_TF_DIR}/${component}"/*.tf 1>/dev/null 2>&1; then + echo "Running command $command on non-default component: $component" + echo "Note: Component: all does not include $component" + run_tf_command "${BASE_TF_DIR}/${component}" "$command" + else + echo "Directory exists but no .tf files found in ${BASE_TF_DIR}/${component}" + exit 1 + fi + else + echo "Component: $component Not supported." + exit 1 + fi ;; esac diff --git a/modules/eks/README.md b/modules/eks/README.md index 4e54785b..99eaa502 100644 --- a/modules/eks/README.md +++ b/modules/eks/README.md @@ -72,7 +72,7 @@ | [create\_eks\_role\_arn](#input\_create\_eks\_role\_arn) | Role arn to assume during the EKS cluster creation. | `string` | n/a | yes | | [deploy\_id](#input\_deploy\_id) | Domino Deployment ID | `string` | n/a | yes | | [efs\_security\_group](#input\_efs\_security\_group) | Security Group ID for EFS | `string` | n/a | yes | -| [eks](#input\_eks) | k8s\_version = "EKS cluster k8s version."
kubeconfig = {
extra\_args = "Optional extra args when generating kubeconfig."
path = "Fully qualified path name to write the kubeconfig file."
}
public\_access = {
enabled = "Enable EKS API public endpoint."
cidrs = "List of CIDR ranges permitted for accessing the EKS public endpoint."
}
"Custom role maps for aws auth configmap"
custom\_role\_maps = {
rolearn = string
username = string
groups = list(string)
}
master\_role\_names = "IAM role names to be added as masters in eks."
cluster\_addons = "EKS cluster addons. vpc-cni is installed separately."
vpc\_cni = Configuration for AWS VPC CNI
ssm\_log\_group\_name = "CloudWatch log group to send the SSM session logs to."
identity\_providers = "Configuration for IDP(Identity Provider)."
} |
object({
k8s_version = optional(string, "1.27")
kubeconfig = optional(object({
extra_args = optional(string, "")
path = optional(string, null)
}), {})
public_access = optional(object({
enabled = optional(bool, false)
cidrs = optional(list(string), [])
}), {})
custom_role_maps = optional(list(object({
rolearn = string
username = string
groups = list(string)
})), [])
master_role_names = optional(list(string), [])
cluster_addons = optional(list(string), ["kube-proxy", "coredns"])
ssm_log_group_name = optional(string, "session-manager")
vpc_cni = optional(object({
prefix_delegation = optional(bool)
}))
identity_providers = optional(list(object({
client_id = string
groups_claim = optional(string, null)
groups_prefix = optional(string, null)
identity_provider_config_name = string
issuer_url = optional(string, null)
required_claims = optional(string, null)
username_claim = optional(string, null)
username_prefix = optional(string, null)
})), [])
})
| `{}` | no | +| [eks](#input\_eks) | creation\_role\_name = Name of the role to import.
k8s\_version = EKS cluster k8s version.
kubeconfig = {
extra\_args = Optional extra args when generating kubeconfig.
path = Fully qualified path name to write the kubeconfig file.
}
public\_access = {
enabled = Enable EKS API public endpoint.
cidrs = List of CIDR ranges permitted for accessing the EKS public endpoint.
}
Custom role maps for aws auth configmap
custom\_role\_maps = {
rolearn = string
username = string
groups = list(string)
}
master\_role\_names = IAM role names to be added as masters in eks.
cluster\_addons = EKS cluster addons. vpc-cni is installed separately.
vpc\_cni = Configuration for AWS VPC CNI
ssm\_log\_group\_name = CloudWatch log group to send the SSM session logs to.
identity\_providers = Configuration for IDP(Identity Provider).
} |
object({
creation_role_name = optional(string, null)
k8s_version = optional(string, "1.27")
kubeconfig = optional(object({
extra_args = optional(string, "")
path = optional(string, null)
}), {})
public_access = optional(object({
enabled = optional(bool, false)
cidrs = optional(list(string), [])
}), {})
custom_role_maps = optional(list(object({
rolearn = string
username = string
groups = list(string)
})), [])
master_role_names = optional(list(string), [])
cluster_addons = optional(list(string), ["kube-proxy", "coredns"])
ssm_log_group_name = optional(string, "session-manager")
vpc_cni = optional(object({
prefix_delegation = optional(bool)
}))
identity_providers = optional(list(object({
client_id = string
groups_claim = optional(string, null)
groups_prefix = optional(string, null)
identity_provider_config_name = string
issuer_url = optional(string, null)
required_claims = optional(string, null)
username_claim = optional(string, null)
username_prefix = optional(string, null)
})), [])
})
| `{}` | no | | [kms\_info](#input\_kms\_info) | key\_id = KMS key id.
key\_arn = KMS key arn.
enabled = KMS key is enabled |
object({
key_id = string
key_arn = string
enabled = bool
})
| n/a | yes | | [network\_info](#input\_network\_info) | id = VPC ID.
subnets = {
public = List of public Subnets.
[{
name = Subnet name.
subnet\_id = Subnet ud
az = Subnet availability\_zone
az\_id = Subnet availability\_zone\_id
}]
private = List of private Subnets.
[{
name = Subnet name.
subnet\_id = Subnet ud
az = Subnet availability\_zone
az\_id = Subnet availability\_zone\_id
}]
pod = List of pod Subnets.
[{
name = Subnet name.
subnet\_id = Subnet ud
az = Subnet availability\_zone
az\_id = Subnet availability\_zone\_id
}]
} |
object({
vpc_id = string
subnets = object({
public = list(object({
name = string
subnet_id = string
az = string
az_id = string
}))
private = list(object({
name = string
subnet_id = string
az = string
az_id = string
}))
pod = list(object({
name = string
subnet_id = string
az = string
az_id = string
}))
})
})
| n/a | yes | | [node\_iam\_policies](#input\_node\_iam\_policies) | Additional IAM Policy Arns for Nodes | `list(string)` | n/a | yes | diff --git a/modules/eks/variables.tf b/modules/eks/variables.tf index d3211821..cbb0e153 100644 --- a/modules/eks/variables.tf +++ b/modules/eks/variables.tf @@ -119,31 +119,33 @@ variable "kms_info" { variable "eks" { description = < [bastion](#input\_bastion) | enabled = Create bastion host.
ami = Ami id. Defaults to latest 'amazon\_linux\_2' ami.
instance\_type = Instance type.
authorized\_ssh\_ip\_ranges = List of CIDR ranges permitted for the bastion ssh access.
username = Bastion user.
install\_binaries = Toggle to install required Domino binaries in the bastion. |
object({
enabled = optional(bool, true)
ami_id = optional(string, null) # default will use the latest 'amazon_linux_2' ami
instance_type = optional(string, "t3.micro")
authorized_ssh_ip_ranges = optional(list(string), ["0.0.0.0/0"])
username = optional(string, "ec2-user")
install_binaries = optional(bool, false)
})
| `{}` | no | | [default\_node\_groups](#input\_default\_node\_groups) | EKS managed node groups definition. |
object(
{
compute = object(
{
ami = optional(string, null)
bootstrap_extra_args = optional(string, "")
instance_types = optional(list(string), ["m5.2xlarge"])
spot = optional(bool, false)
min_per_az = optional(number, 0)
max_per_az = optional(number, 10)
desired_per_az = optional(number, 0)
availability_zone_ids = list(string)
labels = optional(map(string), {
"dominodatalab.com/node-pool" = "default"
})
taints = optional(list(object({
key = string
value = optional(string)
effect = string
})), [])
tags = optional(map(string), {})
gpu = optional(bool, null)
volume = optional(object({
size = optional(number, 1000)
type = optional(string, "gp3")
}), {
size = 1000
type = "gp3"
}
)
}),
platform = object(
{
ami = optional(string, null)
bootstrap_extra_args = optional(string, "")
instance_types = optional(list(string), ["m5.2xlarge"])
spot = optional(bool, false)
min_per_az = optional(number, 1)
max_per_az = optional(number, 10)
desired_per_az = optional(number, 1)
availability_zone_ids = list(string)
labels = optional(map(string), {
"dominodatalab.com/node-pool" = "platform"
})
taints = optional(list(object({
key = string
value = optional(string)
effect = string
})), [])
tags = optional(map(string), {})
gpu = optional(bool, null)
volume = optional(object({
size = optional(number, 100)
type = optional(string, "gp3")
}), {
size = 100
type = "gp3"
}
)
}),
gpu = object(
{
ami = optional(string, null)
bootstrap_extra_args = optional(string, "")
instance_types = optional(list(string), ["g4dn.xlarge"])
spot = optional(bool, false)
min_per_az = optional(number, 0)
max_per_az = optional(number, 10)
desired_per_az = optional(number, 0)
availability_zone_ids = list(string)
labels = optional(map(string), {
"dominodatalab.com/node-pool" = "default-gpu"
"nvidia.com/gpu" = true
})
taints = optional(list(object({
key = string
value = optional(string)
effect = string
})), [{
key = "nvidia.com/gpu"
value = "true"
effect = "NO_SCHEDULE"
}
])
tags = optional(map(string), {})
gpu = optional(bool, null)
volume = optional(object({
size = optional(number, 1000)
type = optional(string, "gp3")
}), {
size = 1000
type = "gp3"
}
)
})
})
| n/a | yes | | [deploy\_id](#input\_deploy\_id) | Domino Deployment ID. | `string` | `"domino-eks"` | no | -| [eks](#input\_eks) | k8s\_version = EKS cluster k8s version.
kubeconfig = {
extra\_args = Optional extra args when generating kubeconfig.
path = Fully qualified path name to write the kubeconfig file.
}
public\_access = {
enabled = Enable EKS API public endpoint.
cidrs = List of CIDR ranges permitted for accessing the EKS public endpoint.
}
"Custom role maps for aws auth configmap
custom\_role\_maps = {
rolearn = string
username = string
groups = list(string)
}
master\_role\_names = IAM role names to be added as masters in eks.
cluster\_addons = EKS cluster addons. vpc-cni is installed separately.
vpc\_cni = Configuration for AWS VPC CNI
ssm\_log\_group\_name = CloudWatch log group to send the SSM session logs to.
identity\_providers = Configuration for IDP(Identity Provider).
} |
object({
k8s_version = optional(string, "1.27")
kubeconfig = optional(object({
extra_args = optional(string, "")
path = optional(string, null)
}), {})
public_access = optional(object({
enabled = optional(bool, false)
cidrs = optional(list(string), [])
}), {})
custom_role_maps = optional(list(object({
rolearn = string
username = string
groups = list(string)
})), [])
master_role_names = optional(list(string), [])
cluster_addons = optional(list(string), ["kube-proxy", "coredns"])
ssm_log_group_name = optional(string, "session-manager")
vpc_cni = optional(object({
prefix_delegation = optional(bool)
}))
identity_providers = optional(list(object({
client_id = string
groups_claim = optional(string, null)
groups_prefix = optional(string, null)
identity_provider_config_name = string
issuer_url = optional(string, null)
required_claims = optional(string, null)
username_claim = optional(string, null)
username_prefix = optional(string, null)
})), [])
})
| `{}` | no | +| [eks](#input\_eks) | creation\_role\_name = Name of the role to import.
k8s\_version = EKS cluster k8s version.
kubeconfig = {
extra\_args = Optional extra args when generating kubeconfig.
path = Fully qualified path name to write the kubeconfig file.
}
public\_access = {
enabled = Enable EKS API public endpoint.
cidrs = List of CIDR ranges permitted for accessing the EKS public endpoint.
}
Custom role maps for aws auth configmap
custom\_role\_maps = {
rolearn = string
username = string
groups = list(string)
}
master\_role\_names = IAM role names to be added as masters in eks.
cluster\_addons = EKS cluster addons. vpc-cni is installed separately.
vpc\_cni = Configuration for AWS VPC CNI
ssm\_log\_group\_name = CloudWatch log group to send the SSM session logs to.
identity\_providers = Configuration for IDP(Identity Provider).
} |
object({
creation_role_name = optional(string, null)
k8s_version = optional(string, "1.27")
kubeconfig = optional(object({
extra_args = optional(string, "")
path = optional(string, null)
}), {})
public_access = optional(object({
enabled = optional(bool, false)
cidrs = optional(list(string), [])
}), {})
custom_role_maps = optional(list(object({
rolearn = string
username = string
groups = list(string)
})), [])
master_role_names = optional(list(string), [])
cluster_addons = optional(list(string), ["kube-proxy", "coredns"])
ssm_log_group_name = optional(string, "session-manager")
vpc_cni = optional(object({
prefix_delegation = optional(bool)
}))
identity_providers = optional(list(object({
client_id = string
groups_claim = optional(string, null)
groups_prefix = optional(string, null)
identity_provider_config_name = string
issuer_url = optional(string, null)
required_claims = optional(string, null)
username_claim = optional(string, null)
username_prefix = optional(string, null)
})), [])
})
| `{}` | no | | [kms](#input\_kms) | enabled = Toggle,if set use either the specified KMS key\_id or a Domino-generated one.
key\_id = optional(string, null) |
object({
enabled = optional(bool, true)
key_id = optional(string, null)
})
| `{}` | no | | [network](#input\_network) | vpc = {
id = Existing vpc id, it will bypass creation by this module.
subnets = {
private = Existing private subnets.
public = Existing public subnets.
pod = Existing pod subnets.
}), {})
}), {})
network\_bits = {
public = Number of network bits to allocate to the public subnet. i.e /27 -> 32 IPs.
private = Number of network bits to allocate to the private subnet. i.e /19 -> 8,192 IPs.
pod = Number of network bits to allocate to the private subnet. i.e /19 -> 8,192 IPs.
}
cidrs = {
vpc = The IPv4 CIDR block for the VPC.
pod = The IPv4 CIDR block for the Pod subnets.
}
use\_pod\_cidr = Use additional pod CIDR range (ie 100.64.0.0/16) for pod networking. |
object({
vpc = optional(object({
id = optional(string, null)
subnets = optional(object({
private = optional(list(string), [])
public = optional(list(string), [])
pod = optional(list(string), [])
}), {})
}), {})
network_bits = optional(object({
public = optional(number, 27)
private = optional(number, 19)
pod = optional(number, 19)
}
), {})
cidrs = optional(object({
vpc = optional(string, "10.0.0.0/16")
pod = optional(string, "100.64.0.0/16")
}), {})
use_pod_cidr = optional(bool, true)
})
| `{}` | no | | [region](#input\_region) | AWS region for the deployment | `string` | n/a | yes | diff --git a/modules/infra/iam.tf b/modules/infra/iam.tf index f6fae324..ba149ebb 100644 --- a/modules/infra/iam.tf +++ b/modules/infra/iam.tf @@ -35,7 +35,7 @@ resource "aws_iam_policy" "route53" { locals { - create_eks_role_name = "${var.deploy_id}-create-eks" + create_eks_role_name = coalesce(var.eks.creation_role_name, "${var.deploy_id}-create-eks") } data "aws_iam_policy_document" "create_eks_role" { @@ -81,7 +81,6 @@ data "aws_iam_policy_document" "create_eks_role" { resources = ["arn:${data.aws_partition.current.partition}:iam::${local.aws_account_id}:role/aws-service-role/*"] effect = "Allow" } - } resource "aws_iam_policy" "create_eks_role" { diff --git a/modules/infra/submodules/network/main.tf b/modules/infra/submodules/network/main.tf index dc49e81a..15c8b2a9 100644 --- a/modules/infra/submodules/network/main.tf +++ b/modules/infra/submodules/network/main.tf @@ -23,7 +23,6 @@ locals { az_ids = local.provided_vpc ? distinct(data.aws_subnet.private[*].availability_zone_id) : distinct(flatten([for name, ng in var.node_groups : ng.availability_zone_ids])) num_of_azs = length(local.az_ids) - ## Calculating public and private subnets based on the base base cidr and desired network bits base_cidr_network_bits = tonumber(regex("[^/]*$", var.network.cidrs.vpc)) ## We have one Cidr to carve the nw bits for both pvt and public subnets diff --git a/modules/infra/variables.tf b/modules/infra/variables.tf index 3678e38e..243d80c7 100644 --- a/modules/infra/variables.tf +++ b/modules/infra/variables.tf @@ -53,6 +53,7 @@ variable "ssh_pvt_key_path" { variable "eks" { description = < [bastion](#input\_bastion) | enabled = Create bastion host.
ami = Ami id. Defaults to latest 'amazon\_linux\_2' ami.
instance\_type = Instance type.
authorized\_ssh\_ip\_ranges = List of CIDR ranges permitted for the bastion ssh access.
username = Bastion user.
install\_binaries = Toggle to install required Domino binaries in the bastion. |
object({
enabled = optional(bool, true)
ami_id = optional(string, null) # default will use the latest 'amazon_linux_2' ami
instance_type = optional(string, "t3.micro")
authorized_ssh_ip_ranges = optional(list(string), ["0.0.0.0/0"])
username = optional(string, "ec2-user")
install_binaries = optional(bool, false)
})
| `{}` | no | | [default\_node\_groups](#input\_default\_node\_groups) | EKS managed node groups definition. |
object(
{
compute = object(
{
ami = optional(string, null)
bootstrap_extra_args = optional(string, "")
instance_types = optional(list(string), ["m5.2xlarge"])
spot = optional(bool, false)
min_per_az = optional(number, 0)
max_per_az = optional(number, 10)
desired_per_az = optional(number, 0)
availability_zone_ids = list(string)
labels = optional(map(string), {
"dominodatalab.com/node-pool" = "default"
})
taints = optional(list(object({
key = string
value = optional(string)
effect = string
})), [])
tags = optional(map(string), {})
gpu = optional(bool, null)
volume = optional(object({
size = optional(number, 1000)
type = optional(string, "gp3")
}), {
size = 1000
type = "gp3"
}
)
}),
platform = object(
{
ami = optional(string, null)
bootstrap_extra_args = optional(string, "")
instance_types = optional(list(string), ["m5.2xlarge"])
spot = optional(bool, false)
min_per_az = optional(number, 1)
max_per_az = optional(number, 10)
desired_per_az = optional(number, 1)
availability_zone_ids = list(string)
labels = optional(map(string), {
"dominodatalab.com/node-pool" = "platform"
})
taints = optional(list(object({
key = string
value = optional(string)
effect = string
})), [])
tags = optional(map(string), {})
gpu = optional(bool, null)
volume = optional(object({
size = optional(number, 100)
type = optional(string, "gp3")
}), {
size = 100
type = "gp3"
}
)
}),
gpu = object(
{
ami = optional(string, null)
bootstrap_extra_args = optional(string, "")
instance_types = optional(list(string), ["g4dn.xlarge"])
spot = optional(bool, false)
min_per_az = optional(number, 0)
max_per_az = optional(number, 10)
desired_per_az = optional(number, 0)
availability_zone_ids = list(string)
labels = optional(map(string), {
"dominodatalab.com/node-pool" = "default-gpu"
"nvidia.com/gpu" = true
})
taints = optional(list(object({
key = string
value = optional(string)
effect = string
})), [{
key = "nvidia.com/gpu"
value = "true"
effect = "NO_SCHEDULE"
}
])
tags = optional(map(string), {})
gpu = optional(bool, null)
volume = optional(object({
size = optional(number, 1000)
type = optional(string, "gp3")
}), {
size = 1000
type = "gp3"
}
)
})
})
| n/a | yes | | [deploy\_id](#input\_deploy\_id) | Domino Deployment ID. | `string` | `"domino-eks"` | no | -| [eks](#input\_eks) | k8s\_version = EKS cluster k8s version.
kubeconfig = {
extra\_args = Optional extra args when generating kubeconfig.
path = Fully qualified path name to write the kubeconfig file.
}
public\_access = {
enabled = Enable EKS API public endpoint.
cidrs = List of CIDR ranges permitted for accessing the EKS public endpoint.
}
"Custom role maps for aws auth configmap
custom\_role\_maps = {
rolearn = string
username = string
groups = list(string)
}
master\_role\_names = IAM role names to be added as masters in eks.
cluster\_addons = EKS cluster addons. vpc-cni is installed separately.
vpc\_cni = Configuration for AWS VPC CNI
ssm\_log\_group\_name = CloudWatch log group to send the SSM session logs to.
identity\_providers = Configuration for IDP(Identity Provider).
} |
object({
k8s_version = optional(string, "1.27")
kubeconfig = optional(object({
extra_args = optional(string, "")
path = optional(string, null)
}), {})
public_access = optional(object({
enabled = optional(bool, false)
cidrs = optional(list(string), [])
}), {})
custom_role_maps = optional(list(object({
rolearn = string
username = string
groups = list(string)
})), [])
master_role_names = optional(list(string), [])
cluster_addons = optional(list(string), ["kube-proxy", "coredns"])
ssm_log_group_name = optional(string, "session-manager")
vpc_cni = optional(object({
prefix_delegation = optional(bool)
}))
identity_providers = optional(list(object({
client_id = string
groups_claim = optional(string, null)
groups_prefix = optional(string, null)
identity_provider_config_name = string
issuer_url = optional(string, null)
required_claims = optional(string, null)
username_claim = optional(string, null)
username_prefix = optional(string, null)
})), [])
})
| `{}` | no | +| [eks](#input\_eks) | k8s\_version = EKS cluster k8s version.
kubeconfig = {
extra\_args = Optional extra args when generating kubeconfig.
path = Fully qualified path name to write the kubeconfig file.
}
public\_access = {
enabled = Enable EKS API public endpoint.
cidrs = List of CIDR ranges permitted for accessing the EKS public endpoint.
}
Custom role maps for aws auth configmap
custom\_role\_maps = {
rolearn = string
username = string
groups = list(string)
}
master\_role\_names = IAM role names to be added as masters in eks.
cluster\_addons = EKS cluster addons. vpc-cni is installed separately.
vpc\_cni = Configuration for AWS VPC CNI
ssm\_log\_group\_name = CloudWatch log group to send the SSM session logs to.
identity\_providers = Configuration for IDP(Identity Provider).
} |
object({
k8s_version = optional(string, "1.27")
kubeconfig = optional(object({
extra_args = optional(string, "")
path = optional(string, null)
}), {})
public_access = optional(object({
enabled = optional(bool, false)
cidrs = optional(list(string), [])
}), {})
custom_role_maps = optional(list(object({
rolearn = string
username = string
groups = list(string)
})), [])
master_role_names = optional(list(string), [])
cluster_addons = optional(list(string), ["kube-proxy", "coredns"])
ssm_log_group_name = optional(string, "session-manager")
vpc_cni = optional(object({
prefix_delegation = optional(bool)
}))
identity_providers = optional(list(object({
client_id = string
groups_claim = optional(string, null)
groups_prefix = optional(string, null)
identity_provider_config_name = string
issuer_url = optional(string, null)
required_claims = optional(string, null)
username_claim = optional(string, null)
username_prefix = optional(string, null)
})), [])
})
| `{}` | no | | [enable\_private\_link](#input\_enable\_private\_link) | Enable Private Link connections | `bool` | `false` | no | | [kms](#input\_kms) | enabled = Toggle,if set use either the specified KMS key\_id or a Domino-generated one.
key\_id = optional(string, null) |
object({
enabled = optional(bool, true)
key_id = optional(string, null)
})
| `{}` | no | | [network](#input\_network) | vpc = {
id = Existing vpc id, it will bypass creation by this module.
subnets = {
private = Existing private subnets.
public = Existing public subnets.
pod = Existing pod subnets.
}), {})
}), {})
network\_bits = {
public = Number of network bits to allocate to the public subnet. i.e /27 -> 32 IPs.
private = Number of network bits to allocate to the private subnet. i.e /19 -> 8,192 IPs.
pod = Number of network bits to allocate to the private subnet. i.e /19 -> 8,192 IPs.
}
cidrs = {
vpc = The IPv4 CIDR block for the VPC.
pod = The IPv4 CIDR block for the Pod subnets.
}
use\_pod\_cidr = Use additional pod CIDR range (ie 100.64.0.0/16) for pod networking. |
object({
vpc = optional(object({
id = optional(string, null)
subnets = optional(object({
private = optional(list(string), [])
public = optional(list(string), [])
pod = optional(list(string), [])
}), {})
}), {})
network_bits = optional(object({
public = optional(number, 27)
private = optional(number, 19)
pod = optional(number, 19)
}
), {})
cidrs = optional(object({
vpc = optional(string, "10.0.0.0/16")
pod = optional(string, "100.64.0.0/16")
}), {})
use_pod_cidr = optional(bool, true)
})
| `{}` | no | diff --git a/tests/plan/terraform/variables.tf b/tests/plan/terraform/variables.tf index bb16619e..8283c806 100644 --- a/tests/plan/terraform/variables.tf +++ b/tests/plan/terraform/variables.tf @@ -62,7 +62,7 @@ variable "eks" { enabled = Enable EKS API public endpoint. cidrs = List of CIDR ranges permitted for accessing the EKS public endpoint. } - "Custom role maps for aws auth configmap + Custom role maps for aws auth configmap custom_role_maps = { rolearn = string username = string