diff --git a/.circleci/config.yml b/.circleci/config.yml
index 69ae9137..eee2e31c 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -47,13 +47,20 @@ commands:
environment:
HCLEDIT_VERSION: << parameters.hcledit_version >>
command: bash ci-deploy.sh install_hcledit
- set_current_mod_source:
+ set_mod_source_current:
description: "Set up module source to current branch."
steps:
- run:
name: "Set module source to current branch"
working_directory: tests/deploy
command: bash ci-deploy.sh set_mod_src_circle_branch
+ set_mod_source_latest_rel:
+ description: "Set up module source to current branch."
+ steps:
+ - run:
+ name: "Set module source to latest published release"
+ working_directory: tests/deploy
+ command: bash ci-deploy.sh set_mod_src_latest_rel
set_aws_creds:
description: "Sets short-lived creds"
steps:
@@ -72,7 +79,6 @@ commands:
command: |
bash ci-deploy.sh setup_modules
bash ci-deploy.sh set_tf_vars
- - set_current_mod_source
install_helm:
description: "Install Helm"
parameters:
@@ -120,6 +126,24 @@ commands:
echo "Running ci-deploy.sh deploy"
bash ci-deploy.sh deploy
fi
+ tf_deploy_single_node:
+ description: "Terraform deploy single-node"
+ steps:
+ - run:
+ name: "Setup single-node module"
+ working_directory: tests/deploy
+ command: bash ci-deploy.sh setup_single_node_tf
+ - run:
+ name: "Deploy single-node"
+ working_directory: tests/deploy
+ command: bash ci-deploy.sh deploy_single_node
+ tf_destroy_single_node:
+ description: "Terraform destroy single-node"
+ steps:
+ - run:
+ name: "Destroy single-node"
+ working_directory: tests/deploy
+ command: bash ci-deploy.sh destroy_single_node
tf_deploy:
description: "Terraform deploy"
steps:
@@ -169,7 +193,10 @@ jobs:
- install_helm:
helm_version: << parameters.helm_version >>
- set_tf_vars
+ - set_mod_source_current
- tf_deploy
+ - tf_deploy_single_node
+ - tf_destroy_single_node
- tf_destroy
test-upgrade:
docker:
@@ -187,14 +214,10 @@ jobs:
terraform_version: << parameters.terraform_version >>
- install_helm:
helm_version: << parameters.helm_version >>
- - install_hcledit
- set_tf_vars
- - run:
- name: "Set module source to latest published release"
- working_directory: tests/deploy
- command: bash ci-deploy.sh set_mod_src_latest_rel
+ - set_mod_source_latest_rel
- tf_deploy
- - set_current_mod_source
+ - set_mod_source_current
- tf_init_apply
- tf_destroy
diff --git a/README.md b/README.md
index 07ec801d..78f403f2 100644
--- a/README.md
+++ b/README.md
@@ -153,7 +153,6 @@ Configure terraform variables at:
* `domino-deploy/terraform/nodes.tfvars`
**NOTE**: The `eks` configuration is required in both the `infra` and `cluster` modules because the Kubernetes version is used for installing the `kubectl` binary on the bastion host. Similarly, `default_node_groups` and `additional_node_groups` must be defined in both the `infra` and `nodes` modules, as the `availability zones` for the `nodes` are necessary for setting up the network infrastructure.
-The `eks` module will source its information from the `infra` outputs if it is not configured on `cluster.tfvars`, as will the `nodes` module if the variables are not configured on `nodes.tfvars`. We recommended setting the variables in `eks` and `nodes` from the beggining as future kubernetes upgrades will be driven from `cluster.tfvars` and `nodes.tfvars`.
### 4. Create SSH Key pair
diff --git a/examples/deploy/terraform/cluster.tfvars b/examples/deploy/terraform/cluster.tfvars
index 6bca19ea..4f9184dd 100644
--- a/examples/deploy/terraform/cluster.tfvars
+++ b/examples/deploy/terraform/cluster.tfvars
@@ -1,2 +1,19 @@
-eks = null
+eks = {
+ cluster_addons = null
+ creation_role_name = null
+ custom_role_maps = null
+ identity_providers = null
+ k8s_version = "1.27"
+ kubeconfig = {
+ extra_args = null
+ path = null
+ }
+ master_role_names = null
+ public_access = {
+ cidrs = null
+ enabled = null
+ }
+ ssm_log_group_name = null
+ vpc_cni = null
+}
kms_info = null
diff --git a/examples/deploy/terraform/cluster/README.md b/examples/deploy/terraform/cluster/README.md
index d4d25891..2af8f051 100644
--- a/examples/deploy/terraform/cluster/README.md
+++ b/examples/deploy/terraform/cluster/README.md
@@ -30,7 +30,7 @@
| Name | Description | Type | Default | Required |
|------|-------------|------|---------|:--------:|
-| [eks](#input\_eks) | creation\_role\_name = Name of the role to import.
k8s\_version = EKS cluster k8s version.
kubeconfig = {
extra\_args = Optional extra args when generating kubeconfig.
path = Fully qualified path name to write the kubeconfig file.
}
public\_access = {
enabled = Enable EKS API public endpoint.
cidrs = List of CIDR ranges permitted for accessing the EKS public endpoint.
}
Custom role maps for aws auth configmap
custom\_role\_maps = {
rolearn = string
username = string
groups = list(string)
}
master\_role\_names = IAM role names to be added as masters in eks.
cluster\_addons = EKS cluster addons. vpc-cni is installed separately.
vpc\_cni = Configuration for AWS VPC CNI
ssm\_log\_group\_name = CloudWatch log group to send the SSM session logs to.
identity\_providers = Configuration for IDP(Identity Provider).
} |
object({| `null` | no | +| [eks](#input\_eks) | creation\_role\_name = Name of the role to import.
creation_role_name = optional(string, null)
k8s_version = optional(string)
kubeconfig = optional(object({
extra_args = optional(string)
path = optional(string)
}), {})
public_access = optional(object({
enabled = optional(bool)
cidrs = optional(list(string))
}), {})
custom_role_maps = optional(list(object({
rolearn = string
username = string
groups = list(string)
})))
master_role_names = optional(list(string))
cluster_addons = optional(list(string))
ssm_log_group_name = optional(string)
vpc_cni = optional(object({
prefix_delegation = optional(bool)
annotate_pod_ip = optional(bool)
}))
identity_providers = optional(list(object({
client_id = string
groups_claim = optional(string)
groups_prefix = optional(string)
identity_provider_config_name = string
issuer_url = optional(string)
required_claims = optional(string)
username_claim = optional(string)
username_prefix = optional(string)
})))
})
object({| `{}` | no | | [kms\_info](#input\_kms\_info) | Overrides the KMS key information. Meant for migrated configurations.
creation_role_name = optional(string, null)
k8s_version = optional(string)
kubeconfig = optional(object({
extra_args = optional(string)
path = optional(string)
}), {})
public_access = optional(object({
enabled = optional(bool)
cidrs = optional(list(string))
}), {})
custom_role_maps = optional(list(object({
rolearn = string
username = string
groups = list(string)
})))
master_role_names = optional(list(string))
cluster_addons = optional(list(string))
ssm_log_group_name = optional(string)
vpc_cni = optional(object({
prefix_delegation = optional(bool)
annotate_pod_ip = optional(bool)
}))
identity_providers = optional(list(object({
client_id = string
groups_claim = optional(string)
groups_prefix = optional(string)
identity_provider_config_name = string
issuer_url = optional(string)
required_claims = optional(string)
username_claim = optional(string)
username_prefix = optional(string)
})))
})
object({| `null` | no | ## Outputs diff --git a/examples/deploy/terraform/cluster/main.tf b/examples/deploy/terraform/cluster/main.tf index 3a0164cf..ea6ef3a5 100644 --- a/examples/deploy/terraform/cluster/main.tf +++ b/examples/deploy/terraform/cluster/main.tf @@ -8,7 +8,6 @@ data "terraform_remote_state" "infra" { locals { infra = data.terraform_remote_state.infra.outputs.infra - eks = var.eks != null ? var.eks : local.infra.eks kms = var.kms_info != null ? var.kms_info : local.infra.kms } @@ -20,7 +19,7 @@ module "eks" { ssh_key = local.infra.ssh_key node_iam_policies = local.infra.node_iam_policies efs_security_group = local.infra.efs_security_group - eks = local.eks + eks = var.eks network_info = local.infra.network kms_info = local.kms bastion_info = local.infra.bastion diff --git a/examples/deploy/terraform/cluster/variables.tf b/examples/deploy/terraform/cluster/variables.tf index 361c8691..89dcc060 100644 --- a/examples/deploy/terraform/cluster/variables.tf +++ b/examples/deploy/terraform/cluster/variables.tf @@ -61,7 +61,7 @@ variable "eks" { }))) }) - default = null + default = {} } variable "kms_info" { diff --git a/examples/deploy/terraform/infra.tfvars b/examples/deploy/terraform/infra.tfvars index 6d8bfe62..c9a985c7 100644 --- a/examples/deploy/terraform/infra.tfvars +++ b/examples/deploy/terraform/infra.tfvars @@ -13,7 +13,7 @@ default_node_groups = { availability_zone_ids = ["usw2-az1", "usw2-az2"] } } -deploy_id = "dominoeks001" +deploy_id = "dominoeks003" eks = { cluster_addons = null creation_role_name = null diff --git a/examples/deploy/terraform/infra/README.md b/examples/deploy/terraform/infra/README.md index 9eafabff..d11e33ab 100644 --- a/examples/deploy/terraform/infra/README.md +++ b/examples/deploy/terraform/infra/README.md @@ -30,7 +30,7 @@ No resources. | [bastion](#input\_bastion) | enabled = Create bastion host.
key_id = string
key_arn = string
enabled = bool
})
object({| n/a | yes | | [default\_node\_groups](#input\_default\_node\_groups) | EKS managed node groups definition. |
enabled = optional(bool)
ami_id = optional(string)
instance_type = optional(string)
authorized_ssh_ip_ranges = optional(list(string))
username = optional(string)
install_binaries = optional(bool)
})
object(| n/a | yes | | [deploy\_id](#input\_deploy\_id) | Domino Deployment ID. | `string` | n/a | yes | -| [eks](#input\_eks) | creation\_role\_name = Name of the role to import.
{
compute = object(
{
ami = optional(string, null)
bootstrap_extra_args = optional(string, "")
instance_types = optional(list(string), ["m5.2xlarge"])
spot = optional(bool, false)
min_per_az = optional(number, 0)
max_per_az = optional(number, 10)
desired_per_az = optional(number, 0)
availability_zone_ids = list(string)
labels = optional(map(string), {
"dominodatalab.com/node-pool" = "default"
})
taints = optional(list(object({
key = string
value = optional(string)
effect = string
})), [])
tags = optional(map(string), {})
gpu = optional(bool, null)
volume = optional(object({
size = optional(number, 1000)
type = optional(string, "gp3")
}), {
size = 1000
type = "gp3"
}
)
}),
platform = object(
{
ami = optional(string, null)
bootstrap_extra_args = optional(string, "")
instance_types = optional(list(string), ["m5.2xlarge"])
spot = optional(bool, false)
min_per_az = optional(number, 1)
max_per_az = optional(number, 10)
desired_per_az = optional(number, 1)
availability_zone_ids = list(string)
labels = optional(map(string), {
"dominodatalab.com/node-pool" = "platform"
})
taints = optional(list(object({
key = string
value = optional(string)
effect = string
})), [])
tags = optional(map(string), {})
gpu = optional(bool, null)
volume = optional(object({
size = optional(number, 100)
type = optional(string, "gp3")
}), {
size = 100
type = "gp3"
}
)
}),
gpu = object(
{
ami = optional(string, null)
bootstrap_extra_args = optional(string, "")
instance_types = optional(list(string), ["g4dn.xlarge"])
spot = optional(bool, false)
min_per_az = optional(number, 0)
max_per_az = optional(number, 10)
desired_per_az = optional(number, 0)
availability_zone_ids = list(string)
labels = optional(map(string), {
"dominodatalab.com/node-pool" = "default-gpu"
"nvidia.com/gpu" = true
})
taints = optional(list(object({
key = string
value = optional(string)
effect = string
})), [{
key = "nvidia.com/gpu"
value = "true"
effect = "NO_SCHEDULE"
}
])
tags = optional(map(string))
gpu = optional(bool)
volume = optional(object({
size = optional(number)
type = optional(string)
}))
})
})
object({| `{}` | no | +| [eks](#input\_eks) | creation\_role\_name = Name of the role to import.
creation_role_name = optional(string, null)
k8s_version = optional(string)
kubeconfig = optional(object({
extra_args = optional(string)
path = optional(string)
}), {})
public_access = optional(object({
enabled = optional(bool)
cidrs = optional(list(string))
}), {})
custom_role_maps = optional(list(object({
rolearn = string
username = string
groups = list(string)
})))
master_role_names = optional(list(string))
cluster_addons = optional(list(string))
ssm_log_group_name = optional(string)
vpc_cni = optional(object({
prefix_delegation = optional(bool)
annotate_pod_ip = optional(bool)
}))
identity_providers = optional(list(object({
client_id = string
groups_claim = optional(string)
groups_prefix = optional(string)
identity_provider_config_name = string
issuer_url = optional(string)
required_claims = optional(string)
username_claim = optional(string)
username_prefix = optional(string)
})))
})
object({| `{}` | no | | [kms](#input\_kms) | enabled = Toggle,if set use either the specified KMS key\_id or a Domino-generated one.
creation_role_name = optional(string, null)
k8s_version = optional(string)
nodes_master = optional(bool, false)
kubeconfig = optional(object({
extra_args = optional(string)
path = optional(string)
}), {})
public_access = optional(object({
enabled = optional(bool)
cidrs = optional(list(string))
}), {})
custom_role_maps = optional(list(object({
rolearn = string
username = string
groups = list(string)
})))
master_role_names = optional(list(string))
cluster_addons = optional(list(string))
ssm_log_group_name = optional(string)
vpc_cni = optional(object({
prefix_delegation = optional(bool)
annotate_pod_ip = optional(bool)
}))
identity_providers = optional(list(object({
client_id = string
groups_claim = optional(string)
groups_prefix = optional(string)
identity_provider_config_name = string
issuer_url = optional(string)
required_claims = optional(string)
username_claim = optional(string)
username_prefix = optional(string)
})))
})
object({| n/a | yes | | [network](#input\_network) | vpc = {
enabled = optional(bool)
key_id = optional(string)
})
object({| `{}` | no | | [region](#input\_region) | AWS region for the deployment | `string` | n/a | yes | diff --git a/examples/deploy/terraform/infra/variables.tf b/examples/deploy/terraform/infra/variables.tf index 4d531098..9ace7e51 100644 --- a/examples/deploy/terraform/infra/variables.tf +++ b/examples/deploy/terraform/infra/variables.tf @@ -249,6 +249,7 @@ variable "eks" { description = <
vpc = optional(object({
id = optional(string, null)
subnets = optional(object({
private = optional(list(string), [])
public = optional(list(string), [])
pod = optional(list(string), [])
}), {})
}), {})
network_bits = optional(object({
public = optional(number, 27)
private = optional(number, 19)
pod = optional(number, 19)
}
), {})
cidrs = optional(object({
vpc = optional(string, "10.0.0.0/16")
pod = optional(string, "100.64.0.0/16")
}), {})
use_pod_cidr = optional(bool, true)
})
object({| `{}` | no | +| [eks](#input\_eks) | creation\_role\_name = Name of the role to import.
creation_role_name = optional(string, null)
k8s_version = optional(string, "1.27")
kubeconfig = optional(object({
extra_args = optional(string, "")
path = optional(string, null)
}), {})
public_access = optional(object({
enabled = optional(bool, false)
cidrs = optional(list(string), [])
}), {})
custom_role_maps = optional(list(object({
rolearn = string
username = string
groups = list(string)
})), [])
master_role_names = optional(list(string), [])
cluster_addons = optional(list(string), ["kube-proxy", "coredns"])
ssm_log_group_name = optional(string, "session-manager")
vpc_cni = optional(object({
prefix_delegation = optional(bool, false)
annotate_pod_ip = optional(bool, true)
}))
identity_providers = optional(list(object({
client_id = string
groups_claim = optional(string, null)
groups_prefix = optional(string, null)
identity_provider_config_name = string
issuer_url = optional(string, null)
required_claims = optional(string, null)
username_claim = optional(string, null)
username_prefix = optional(string, null)
})), [])
})
object({| `{}` | no | | [kms\_info](#input\_kms\_info) | key\_id = KMS key id.
creation_role_name = optional(string, null)
k8s_version = optional(string, "1.27")
nodes_master = optional(bool, false)
kubeconfig = optional(object({
extra_args = optional(string, "")
path = optional(string, null)
}), {})
public_access = optional(object({
enabled = optional(bool, false)
cidrs = optional(list(string), [])
}), {})
custom_role_maps = optional(list(object({
rolearn = string
username = string
groups = list(string)
})), [])
master_role_names = optional(list(string), [])
cluster_addons = optional(list(string), ["kube-proxy", "coredns"])
ssm_log_group_name = optional(string, "session-manager")
vpc_cni = optional(object({
prefix_delegation = optional(bool, false)
annotate_pod_ip = optional(bool, true)
}))
identity_providers = optional(list(object({
client_id = string
groups_claim = optional(string, null)
groups_prefix = optional(string, null)
identity_provider_config_name = string
issuer_url = optional(string, null)
required_claims = optional(string, null)
username_claim = optional(string, null)
username_prefix = optional(string, null)
})), [])
})
object({| n/a | yes | | [network\_info](#input\_network\_info) | id = VPC ID.
key_id = string
key_arn = string
enabled = bool
})
object({| n/a | yes | | [node\_iam\_policies](#input\_node\_iam\_policies) | Additional IAM Policy Arns for Nodes | `list(string)` | n/a | yes | diff --git a/modules/eks/main.tf b/modules/eks/main.tf index c3a37495..0ecb6f40 100644 --- a/modules/eks/main.tf +++ b/modules/eks/main.tf @@ -213,6 +213,7 @@ locals { } } nodes = { + nodes_master = var.eks.nodes_master security_group_id = aws_security_group.eks_nodes.id roles = [{ arn = aws_iam_role.eks_nodes.arn diff --git a/modules/eks/submodules/k8s/README.md b/modules/eks/submodules/k8s/README.md index e164a26f..2f6743a3 100644 --- a/modules/eks/submodules/k8s/README.md +++ b/modules/eks/submodules/k8s/README.md @@ -34,7 +34,7 @@ No modules. |------|-------------|------|---------|:--------:| | [bastion\_info](#input\_bastion\_info) | user = Bastion username.
vpc_id = string
subnets = object({
public = list(object({
name = string
subnet_id = string
az = string
az_id = string
}))
private = list(object({
name = string
subnet_id = string
az = string
az_id = string
}))
pod = list(object({
name = string
subnet_id = string
az = string
az_id = string
}))
})
})
object({| n/a | yes | | [calico\_version](#input\_calico\_version) | Calico operator version. | `string` | `"v3.25.0"` | no | -| [eks\_info](#input\_eks\_info) | cluster = {
user = string
public_ip = string
security_group_id = string
ssh_bastion_command = string
})
object({| n/a | yes | +| [eks\_info](#input\_eks\_info) | cluster = {
cluster = object({
version = string
arn = string
security_group_id = string
endpoint = string
roles = list(object({
name = string
arn = string
}))
custom_roles = list(object({
rolearn = string
username = string
groups = list(string)
}))
oidc = object({
arn = string
url = string
})
})
nodes = object({
security_group_id = string
roles = list(object({
name = string
arn = string
}))
})
kubeconfig = object({
path = string
extra_args = string
})
})
object({| n/a | yes | | [network\_info](#input\_network\_info) | id = VPC ID.
cluster = object({
version = string
arn = string
security_group_id = string
endpoint = string
roles = list(object({
name = string
arn = string
}))
custom_roles = list(object({
rolearn = string
username = string
groups = list(string)
}))
oidc = object({
arn = string
url = string
})
})
nodes = object({
nodes_master = bool
security_group_id = string
roles = list(object({
name = string
arn = string
}))
})
kubeconfig = object({
path = string
extra_args = string
})
})
object({| n/a | yes | | [ssh\_key](#input\_ssh\_key) | path = SSH private key filepath.
vpc_id = string
subnets = object({
public = list(object({
name = string
subnet_id = string
az = string
az_id = string
}))
private = optional(list(object({
name = string
subnet_id = string
az = string
az_id = string
})), [])
pod = optional(list(object({
name = string
subnet_id = string
az = string
az_id = string
})), [])
})
})
object({| n/a | yes | diff --git a/modules/eks/submodules/k8s/main.tf b/modules/eks/submodules/k8s/main.tf index 20cead37..1c66e30c 100644 --- a/modules/eks/submodules/k8s/main.tf +++ b/modules/eks/submodules/k8s/main.tf @@ -42,6 +42,7 @@ locals { filename = local.aws_auth_filename content = templatefile("${local.templates_dir}/${local.aws_auth_template}", { + nodes_master = try(var.eks_info.nodes.nodes_master, false) eks_node_role_arns = toset(var.eks_info.nodes.roles[*].arn) eks_master_role_arns = toset(var.eks_info.cluster.roles[*].arn) eks_custom_role_maps = var.eks_info.cluster.custom_roles diff --git a/modules/eks/submodules/k8s/templates/aws-auth.yaml.tftpl b/modules/eks/submodules/k8s/templates/aws-auth.yaml.tftpl index be3f5085..56642af6 100644 --- a/modules/eks/submodules/k8s/templates/aws-auth.yaml.tftpl +++ b/modules/eks/submodules/k8s/templates/aws-auth.yaml.tftpl @@ -11,6 +11,7 @@ data: groups: - system:bootstrappers - system:nodes + %{ if nodes_master }- system:masters%{ endif } %{ endfor ~} %{ for role, arn in eks_master_role_arns ~} - rolearn: ${arn} diff --git a/modules/eks/submodules/k8s/templates/k8s-functions.sh.tftpl b/modules/eks/submodules/k8s/templates/k8s-functions.sh.tftpl index 15a66cba..8675a023 100644 --- a/modules/eks/submodules/k8s/templates/k8s-functions.sh.tftpl +++ b/modules/eks/submodules/k8s/templates/k8s-functions.sh.tftpl @@ -23,12 +23,12 @@ open_ssh_tunnel_to_k8s_api() { return 1 fi else - printf "$GREEN No bastion, no tunnel needed... $EC \n" + printf "$GREEN No bastion, no tunnel needed... $EC\n" fi } check_kubeconfig() { - printf "$GREEN Checking if $KUBECONFIG exists... $EC \n" + printf "$GREEN Checking if $KUBECONFIG exists... $EC\n" if test -f "$KUBECONFIG"; then if [[ -n "${bastion_public_ip}" ]]; then echo "$KUBECONFIG exists, creating $KUBECONFIG_PROXY for proxy use." @@ -135,8 +135,6 @@ install_calico() { --timeout 10m \ --create-namespace \ --install - - echo } validate_url() { @@ -161,7 +159,7 @@ kubectl_apply() { } helm_cmd() { - printf "$GREEN Running helm $@...$EC \n" + printf "Running helm $@...\n" helm --kubeconfig "$kubeconfig" $@ if [ $? -ne 0 ]; then printf "$RED Error running helm $@ $EC \n" @@ -170,7 +168,7 @@ helm_cmd() { } kubectl_cmd() { - printf "$GREEN kubectl $@... $EC \n" + printf "kubectl $@...\n" kubectl --kubeconfig "$kubeconfig" $@ if [ $? -ne 0 ]; then printf "$RED Error running kubectl $@ $EC \n" @@ -178,6 +176,30 @@ kubectl_cmd() { fi } +wait_for_node() { + TIMEOUT=600 + ELAPSED_TIME=0 + SLEEP_INTERVAL=30 + + while true; do + READY_NODES_COUNT=$(kubectl --kubeconfig "$kubeconfig" get nodes -o json | jq '[.items[] | .status.conditions[] | select(.type=="Ready" and .status=="True")] | length') + + if [[ "$READY_NODES_COUNT" -ge 1 ]]; then + echo "At least one node is in Ready status!" + kubectl --kubeconfig "$kubeconfig" get nodes + return 0 + else + echo "Waiting for a node to be in Ready status..." + sleep $SLEEP_INTERVAL + ELAPSED_TIME=$((ELAPSED_TIME + SLEEP_INTERVAL)) + if [[ "$ELAPSED_TIME" -ge "$TIMEOUT" ]]; then + echo "Timeout reached. Exiting." + exit 1 + fi + fi + done +} + close_ssh_tunnel_to_k8s_api() { if [[ -n "${bastion_public_ip}" ]]; then printf "$GREEN Shutting down k8s tunnel ... $EC" diff --git a/modules/eks/submodules/k8s/variables.tf b/modules/eks/submodules/k8s/variables.tf index 35870e3c..ab3642ac 100644 --- a/modules/eks/submodules/k8s/variables.tf +++ b/modules/eks/submodules/k8s/variables.tf @@ -136,6 +136,7 @@ variable "eks_info" { }) }) nodes = object({ + nodes_master = bool security_group_id = string roles = list(object({ name = string diff --git a/modules/eks/variables.tf b/modules/eks/variables.tf index 137d1b92..c0e2b7b7 100644 --- a/modules/eks/variables.tf +++ b/modules/eks/variables.tf @@ -121,6 +121,7 @@ variable "eks" { description = <
path = string
key_pair_name = string
})
object({| `{}` | no | | [default\_node\_groups](#input\_default\_node\_groups) | EKS managed node groups definition. |
enabled = optional(bool, true)
ami_id = optional(string, null) # default will use the latest 'amazon_linux_2' ami
instance_type = optional(string, "t3.micro")
authorized_ssh_ip_ranges = optional(list(string), ["0.0.0.0/0"])
username = optional(string, "ec2-user")
install_binaries = optional(bool, false)
})
object(| n/a | yes | | [deploy\_id](#input\_deploy\_id) | Domino Deployment ID. | `string` | `"domino-eks"` | no | -| [eks](#input\_eks) | creation\_role\_name = Name of the role to import.
{
compute = object(
{
ami = optional(string, null)
bootstrap_extra_args = optional(string, "")
instance_types = optional(list(string), ["m5.2xlarge"])
spot = optional(bool, false)
min_per_az = optional(number, 0)
max_per_az = optional(number, 10)
desired_per_az = optional(number, 0)
availability_zone_ids = list(string)
labels = optional(map(string), {
"dominodatalab.com/node-pool" = "default"
})
taints = optional(list(object({
key = string
value = optional(string)
effect = string
})), [])
tags = optional(map(string), {})
gpu = optional(bool, null)
volume = optional(object({
size = optional(number, 1000)
type = optional(string, "gp3")
}), {
size = 1000
type = "gp3"
}
)
}),
platform = object(
{
ami = optional(string, null)
bootstrap_extra_args = optional(string, "")
instance_types = optional(list(string), ["m5.2xlarge"])
spot = optional(bool, false)
min_per_az = optional(number, 1)
max_per_az = optional(number, 10)
desired_per_az = optional(number, 1)
availability_zone_ids = list(string)
labels = optional(map(string), {
"dominodatalab.com/node-pool" = "platform"
})
taints = optional(list(object({
key = string
value = optional(string)
effect = string
})), [])
tags = optional(map(string), {})
gpu = optional(bool, null)
volume = optional(object({
size = optional(number, 100)
type = optional(string, "gp3")
}), {
size = 100
type = "gp3"
}
)
}),
gpu = object(
{
ami = optional(string, null)
bootstrap_extra_args = optional(string, "")
instance_types = optional(list(string), ["g4dn.xlarge"])
spot = optional(bool, false)
min_per_az = optional(number, 0)
max_per_az = optional(number, 10)
desired_per_az = optional(number, 0)
availability_zone_ids = list(string)
labels = optional(map(string), {
"dominodatalab.com/node-pool" = "default-gpu"
"nvidia.com/gpu" = true
})
taints = optional(list(object({
key = string
value = optional(string)
effect = string
})), [{
key = "nvidia.com/gpu"
value = "true"
effect = "NO_SCHEDULE"
}
])
tags = optional(map(string), {})
gpu = optional(bool, null)
volume = optional(object({
size = optional(number, 1000)
type = optional(string, "gp3")
}), {
size = 1000
type = "gp3"
}
)
})
})
object({| `{}` | no | +| [eks](#input\_eks) | creation\_role\_name = Name of the role to import.
creation_role_name = optional(string, null)
k8s_version = optional(string, "1.27")
kubeconfig = optional(object({
extra_args = optional(string, "")
path = optional(string, null)
}), {})
public_access = optional(object({
enabled = optional(bool, false)
cidrs = optional(list(string), [])
}), {})
custom_role_maps = optional(list(object({
rolearn = string
username = string
groups = list(string)
})), [])
master_role_names = optional(list(string), [])
cluster_addons = optional(list(string), ["kube-proxy", "coredns"])
ssm_log_group_name = optional(string, "session-manager")
vpc_cni = optional(object({
prefix_delegation = optional(bool)
annotate_pod_ip = optional(bool)
}))
identity_providers = optional(list(object({
client_id = string
groups_claim = optional(string, null)
groups_prefix = optional(string, null)
identity_provider_config_name = string
issuer_url = optional(string, null)
required_claims = optional(string, null)
username_claim = optional(string, null)
username_prefix = optional(string, null)
})), [])
})
object({| `{}` | no | | [kms](#input\_kms) | enabled = "Toggle, if set use either the specified KMS key\_id or a Domino-generated one"
creation_role_name = optional(string, null)
k8s_version = optional(string, "1.27")
nodes_master = optional(bool, false)
kubeconfig = optional(object({
extra_args = optional(string, "")
path = optional(string, null)
}), {})
public_access = optional(object({
enabled = optional(bool, false)
cidrs = optional(list(string), [])
}), {})
custom_role_maps = optional(list(object({
rolearn = string
username = string
groups = list(string)
})), [])
master_role_names = optional(list(string), [])
cluster_addons = optional(list(string), ["kube-proxy", "coredns"])
ssm_log_group_name = optional(string, "session-manager")
vpc_cni = optional(object({
prefix_delegation = optional(bool)
annotate_pod_ip = optional(bool)
}))
identity_providers = optional(list(object({
client_id = string
groups_claim = optional(string, null)
groups_prefix = optional(string, null)
identity_provider_config_name = string
issuer_url = optional(string, null)
required_claims = optional(string, null)
username_claim = optional(string, null)
username_prefix = optional(string, null)
})), [])
})
object({| `{}` | no | | [network](#input\_network) | vpc = {
enabled = optional(bool, true)
key_id = optional(string, null)
additional_policies = optional(list(string), [])
})
object({| `{}` | no | | [region](#input\_region) | AWS region for the deployment | `string` | n/a | yes | diff --git a/modules/infra/variables.tf b/modules/infra/variables.tf index cb069a36..03d4a7ee 100644 --- a/modules/infra/variables.tf +++ b/modules/infra/variables.tf @@ -55,6 +55,7 @@ variable "eks" { description = <
vpc = optional(object({
id = optional(string, null)
subnets = optional(object({
private = optional(list(string), [])
public = optional(list(string), [])
pod = optional(list(string), [])
}), {})
}), {})
network_bits = optional(object({
public = optional(number, 27)
private = optional(number, 19)
pod = optional(number, 19)
}
), {})
cidrs = optional(object({
vpc = optional(string, "10.0.0.0/16")
pod = optional(string, "100.64.0.0/16")
}), {})
use_pod_cidr = optional(bool, true)
})
object({| n/a | yes | +| [kms\_info](#input\_kms\_info) | key\_id = KMS key id.
k8s_pre_setup_sh_file = string
cluster = object({
addons = list(string)
specs = object({
name = string
endpoint = string
kubernetes_network_config = list(map(any))
certificate_authority = list(map(any))
})
version = string
arn = string
security_group_id = string
endpoint = string
roles = list(object({
name = string
arn = string
}))
custom_roles = list(object({
rolearn = string
username = string
groups = list(string)
}))
oidc = object({
arn = string
url = string
})
})
nodes = object({
security_group_id = string
roles = list(object({
name = string
arn = string
}))
})
kubeconfig = object({
path = string
extra_args = string
})
})
object({| n/a | yes | +| [network\_info](#input\_network\_info) | id = VPC ID.
key_id = string
key_arn = string
enabled = bool
})
object({| n/a | yes | +| [region](#input\_region) | AWS region for the deployment | `string` | n/a | yes | +| [run\_post\_node\_setup](#input\_run\_post\_node\_setup) | Toggle installing addons and calico | `bool` | `true` | no | +| [single\_node](#input\_single\_node) | Additional EKS managed node groups definition. |
vpc_id = string
subnets = object({
public = list(object({
name = string
subnet_id = string
az = string
az_id = string
}))
private = optional(list(object({
name = string
subnet_id = string
az = string
az_id = string
})), [])
pod = optional(list(object({
name = string
subnet_id = string
az = string
az_id = string
})), [])
})
})
object({| `{}` | no | +| [ssh\_key](#input\_ssh\_key) | path = SSH private key filepath.
name = optional(string, "single-node")
bootstrap_extra_args = optional(string, "")
ami = optional(object({
name_prefix = optional(string, null)
owner = optional(string, null)
}))
instance_type = optional(string, "m5.2xlarge")
authorized_ssh_ip_ranges = optional(list(string), ["0.0.0.0/0"])
labels = optional(map(string))
taints = optional(list(object({
key = string
value = optional(string)
effect = string
})), [])
volume = optional(object({
size = optional(number, 200)
type = optional(string, "gp3")
}), {})
})
object({| n/a | yes | + +## Outputs + +| Name | Description | +|------|-------------| +| [info](#output\_info) | Node details. | + diff --git a/modules/single-node/main.tf b/modules/single-node/main.tf new file mode 100644 index 00000000..4c02005d --- /dev/null +++ b/modules/single-node/main.tf @@ -0,0 +1,80 @@ +data "aws_default_tags" "this" {} + +locals { + security_group_rules = { + single_node_inbound_ssh = { + protocol = "tcp" + from_port = "22" + to_port = "22" + type = "ingress" + description = "Inbound ssh" + cidr_blocks = var.single_node.authorized_ssh_ip_ranges + } + } + ami_name = try(var.single_node.ami.name_prefix, null) != null ? "${var.single_node.ami.name_prefix}${var.eks_info.cluster.version}*" : "amazon-eks-node-${var.eks_info.cluster.version}*" + ami_owner = coalesce(var.single_node.ami.owner, "602401143452") #amazon + +} + +data "aws_ami" "single_node" { + most_recent = true + owners = [local.ami_owner] + + filter { + name = "name" + values = [local.ami_name] + } +} + + +data "aws_eks_addon_version" "default" { + for_each = var.run_post_node_setup ? toset(var.eks_info.cluster.addons) : [] + addon_name = each.key + kubernetes_version = var.eks_info.cluster.version +} + +resource "terraform_data" "node_is_ready" { + count = try(fileexists(var.eks_info.k8s_pre_setup_sh_file), false) ? 1 : 0 + + # Even though the node is ready coredns hangs or takes 15m, waiting a bit reduces it to 15s. + # https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1801 + provisioner "local-exec" { + command = "bash ./${basename(var.eks_info.k8s_pre_setup_sh_file)} wait_for_node && sleep 60" + interpreter = ["bash", "-c"] + working_dir = dirname(var.eks_info.k8s_pre_setup_sh_file) + } + + triggers_replace = [ + filemd5(var.eks_info.k8s_pre_setup_sh_file), + aws_instance.single_node.id + ] + depends_on = [aws_instance.single_node] +} + + +resource "aws_eks_addon" "this" { + for_each = var.run_post_node_setup ? toset(var.eks_info.cluster.addons) : [] + cluster_name = var.eks_info.cluster.specs.name + addon_name = each.key + addon_version = data.aws_eks_addon_version.default[each.key].version + resolve_conflicts_on_create = "OVERWRITE" + resolve_conflicts_on_update = "OVERWRITE" + + depends_on = [terraform_data.node_is_ready] +} + +resource "terraform_data" "calico_setup" { + count = try(fileexists(var.eks_info.k8s_pre_setup_sh_file), false) && var.run_post_node_setup ? 1 : 0 + + triggers_replace = [ + filemd5(var.eks_info.k8s_pre_setup_sh_file) + ] + + provisioner "local-exec" { + command = "bash ./${basename(var.eks_info.k8s_pre_setup_sh_file)} install_calico" + interpreter = ["bash", "-c"] + working_dir = dirname(var.eks_info.k8s_pre_setup_sh_file) + } + + depends_on = [terraform_data.node_is_ready] +} diff --git a/modules/single-node/outputs.tf b/modules/single-node/outputs.tf new file mode 100644 index 00000000..46d6de89 --- /dev/null +++ b/modules/single-node/outputs.tf @@ -0,0 +1,13 @@ +output "info" { + description = "Node details." + value = { + private_ip = aws_instance.single_node.private_ip + ami = aws_instance.single_node.ami + id = aws_instance.single_node.id + public_ip = aws_eip.single_node.public_ip + instance_type = aws_instance.single_node.instance_type + iam_instance_profile = aws_instance.single_node.iam_instance_profile + subnet_id = aws_instance.single_node.subnet_id + key_name = aws_instance.single_node.key_name + } +} diff --git a/modules/single-node/single-node.tf b/modules/single-node/single-node.tf new file mode 100644 index 00000000..4a9f8aee --- /dev/null +++ b/modules/single-node/single-node.tf @@ -0,0 +1,142 @@ +locals { + instance_labels = merge({ + "kubernetes.io/cluster/${var.eks_info.cluster.specs.name}" = "owned" + "k8s.io/cluster/${var.eks_info.cluster.specs.name}" = "owned" + "Name" = "${var.eks_info.cluster.specs.name}-${var.single_node.name}" + # iam-bootstrap uses "ec2:ResourceTag/cluster" for ec2 perms + "cluster" = var.eks_info.cluster.specs.name + "node-type" = "single-node" + "single-node" = "true" + }, data.aws_default_tags.this.tags, var.single_node.labels) + + kubelet_extra_args = "--kubelet-extra-args '--node-labels=${join(",", [for k, v in var.single_node.labels : format("%s=%s", k, v)])}'" + + bootstrap_extra_args = join(" ", [local.kubelet_extra_args, var.single_node.bootstrap_extra_args]) +} + +resource "aws_security_group" "single_node" { + name = "${var.eks_info.cluster.specs.name}-${var.single_node.name}" + description = "Single Node security group" + revoke_rules_on_delete = true + vpc_id = var.network_info.vpc_id + + lifecycle { + create_before_destroy = true + ignore_changes = [description] + } + + tags = { + "Name" = "${var.eks_info.cluster.specs.name}-${var.single_node.name}" + } + +} + +resource "aws_security_group_rule" "single_node" { + for_each = local.security_group_rules + + security_group_id = aws_security_group.single_node.id + protocol = each.value.protocol + from_port = each.value.from_port + to_port = each.value.to_port + type = each.value.type + description = each.value.description + cidr_blocks = each.value.cidr_blocks +} + +resource "aws_launch_template" "single_node" { + name = "${var.eks_info.cluster.specs.name}-${var.single_node.name}" + disable_api_termination = false + key_name = var.ssh_key.key_pair_name + update_default_version = true + user_data = base64encode(templatefile( + "${path.module}/templates/linux_user_data.tpl", + { + cluster_name = var.eks_info.cluster.specs.name + cluster_endpoint = var.eks_info.cluster.specs.endpoint + cluster_auth_base64 = var.eks_info.cluster.specs.certificate_authority[0].data + # Optional + cluster_service_ipv4_cidr = var.eks_info.cluster.specs.kubernetes_network_config[0].service_ipv4_cidr != null ? var.eks_info.cluster.specs.kubernetes_network_config[0].service_ipv4_cidr : "" + bootstrap_extra_args = local.bootstrap_extra_args + pre_bootstrap_user_data = "" + post_bootstrap_user_data = "" + })) + + vpc_security_group_ids = [var.eks_info.nodes.security_group_id, aws_security_group.single_node.id] + image_id = data.aws_ami.single_node.id + + block_device_mappings { + device_name = try(data.aws_ami.single_node.root_device_name, "/dev/xvda") + + ebs { + delete_on_termination = true + encrypted = true + volume_size = var.single_node.volume.size + volume_type = var.single_node.volume.type + kms_key_id = var.kms_info.enabled ? var.kms_info.key_arn : null + } + } + + metadata_options { + http_endpoint = "enabled" + http_put_response_hop_limit = "2" + http_tokens = "required" + } + + dynamic "tag_specifications" { + for_each = toset(["instance", "volume"]) + content { + resource_type = tag_specifications.value + tags = local.instance_labels + } + } + + lifecycle { + ignore_changes = [ + block_device_mappings[0].ebs[0].kms_key_id, + ] + } +} + + +resource "aws_iam_instance_profile" "single_node" { + name = "${var.eks_info.cluster.specs.name}-${var.single_node.name}" + role = var.eks_info.nodes.roles[0].name +} + + +resource "aws_instance" "single_node" { + subnet_id = var.network_info.subnets.public[0].subnet_id + iam_instance_profile = aws_iam_instance_profile.single_node.name + instance_type = var.single_node.instance_type + monitoring = true + + launch_template { + id = aws_launch_template.single_node.id + version = "$Latest" + } + root_block_device { + delete_on_termination = true + encrypted = true + iops = "3000" + } + + metadata_options { + http_endpoint = "enabled" + http_tokens = "required" + instance_metadata_tags = "disabled" + } + + tags = local.instance_labels +} + + +resource "aws_eip" "single_node" { + instance = aws_instance.single_node.id + network_border_group = var.region + domain = "vpc" +} + +resource "aws_eip_association" "single_node" { + instance_id = aws_instance.single_node.id + allocation_id = aws_eip.single_node.id +} diff --git a/modules/single-node/templates/linux_user_data.tpl b/modules/single-node/templates/linux_user_data.tpl new file mode 100644 index 00000000..065a60d5 --- /dev/null +++ b/modules/single-node/templates/linux_user_data.tpl @@ -0,0 +1,10 @@ +#!/bin/bash +set -e +${pre_bootstrap_user_data ~} +%{ if length(cluster_service_ipv4_cidr) > 0 ~} +export SERVICE_IPV4_CIDR=${cluster_service_ipv4_cidr} +%{ endif ~} +B64_CLUSTER_CA=${cluster_auth_base64} +API_SERVER_URL=${cluster_endpoint} +/etc/eks/bootstrap.sh ${cluster_name} ${bootstrap_extra_args} --b64-cluster-ca $B64_CLUSTER_CA --apiserver-endpoint $API_SERVER_URL +${post_bootstrap_user_data ~} diff --git a/modules/single-node/variables.tf b/modules/single-node/variables.tf new file mode 100644 index 00000000..aaa972f4 --- /dev/null +++ b/modules/single-node/variables.tf @@ -0,0 +1,204 @@ +variable "ssh_key" { + description = <
path = string
key_pair_name = string
})
object({| `{}` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [info](#output\_info) | Single Node details. | + diff --git a/tests/deploy/single-node/main.tf b/tests/deploy/single-node/main.tf new file mode 100644 index 00000000..2c50a9f9 --- /dev/null +++ b/tests/deploy/single-node/main.tf @@ -0,0 +1,32 @@ +data "terraform_remote_state" "infra" { + backend = "local" + + config = { + path = "${path.module}/../infra.tfstate" + } +} + +data "terraform_remote_state" "eks" { + backend = "local" + + config = { + path = "${path.module}/../cluster.tfstate" + } +} + +locals { + infra = data.terraform_remote_state.infra.outputs.infra + eks = data.terraform_remote_state.eks.outputs.eks +} + +module "single_node" { + source = "./../../../modules/single-node" + region = local.infra.region + + ssh_key = local.infra.ssh_key + single_node = var.single_node + eks_info = local.eks + network_info = local.infra.network + kms_info = local.infra.kms + run_post_node_setup = false +} diff --git a/tests/deploy/single-node/outputs.tf b/tests/deploy/single-node/outputs.tf new file mode 100644 index 00000000..d32e59db --- /dev/null +++ b/tests/deploy/single-node/outputs.tf @@ -0,0 +1,4 @@ +output "info" { + description = "Single Node details." + value = module.single_node.info +} diff --git a/tests/deploy/single-node/single-node.tfvars b/tests/deploy/single-node/single-node.tfvars new file mode 100644 index 00000000..ae884319 --- /dev/null +++ b/tests/deploy/single-node/single-node.tfvars @@ -0,0 +1,13 @@ +single_node = { + instance_type = "m5.2xlarge" + name = "dev-v2" + ami = { + name_prefix = "dev-v2_sandbox_" + owner = "977170443939" + + } + labels = { + "dominodatalab.com/node-pool" = "default", + "dominodatalab.com/domino-node" = "true" + }, +} diff --git a/tests/deploy/single-node/variables.tf b/tests/deploy/single-node/variables.tf new file mode 100644 index 00000000..f7ddff6c --- /dev/null +++ b/tests/deploy/single-node/variables.tf @@ -0,0 +1,26 @@ +variable "single_node" { + description = "Additional EKS managed node groups definition." + type = object({ + name = optional(string, "single-node") + bootstrap_extra_args = optional(string, "") + ami = optional(object({ + name_prefix = optional(string, null) + owner = optional(string, null) + + })) + instance_type = optional(string, "m5.2xlarge") + authorized_ssh_ip_ranges = optional(list(string), ["0.0.0.0/0"]) + labels = optional(map(string)) + taints = optional(list(object({ + key = string + value = optional(string) + effect = string + })), []) + volume = optional(object({ + size = optional(number, 1000) + type = optional(string, "gp3") + }), {}) + }) + + default = {} +} diff --git a/tests/deploy/single-node/versions.tf b/tests/deploy/single-node/versions.tf new file mode 100644 index 00000000..275d0a41 --- /dev/null +++ b/tests/deploy/single-node/versions.tf @@ -0,0 +1,17 @@ +terraform { + required_version = ">= 1.0" + required_providers { + aws = { + source = "hashicorp/aws" + version = "~> 5.0" + } + } +} + + +provider "aws" { + region = local.infra.region + default_tags { + tags = local.infra.tags + } +} diff --git a/tests/plan/terraform/README.md b/tests/plan/terraform/README.md index 9218ca46..54e56580 100644 --- a/tests/plan/terraform/README.md +++ b/tests/plan/terraform/README.md @@ -21,6 +21,7 @@ No providers. | [eks](#module\_eks) | ./../../../modules/eks | n/a | | [infra](#module\_infra) | ./../../../modules/infra/ | n/a | | [nodes](#module\_nodes) | ./../../../modules/nodes | n/a | +| [single\_node](#module\_single\_node) | ./../../../modules/single-node | n/a | ## Resources @@ -34,12 +35,13 @@ No resources. | [bastion](#input\_bastion) | enabled = Create bastion host.
name = optional(string, "single-node")
bootstrap_extra_args = optional(string, "")
ami = optional(object({
name_prefix = optional(string, null)
owner = optional(string, null)
}))
instance_type = optional(string, "m5.2xlarge")
authorized_ssh_ip_ranges = optional(list(string), ["0.0.0.0/0"])
labels = optional(map(string))
taints = optional(list(object({
key = string
value = optional(string)
effect = string
})), [])
volume = optional(object({
size = optional(number, 1000)
type = optional(string, "gp3")
}), {})
})
object({| `{}` | no | | [default\_node\_groups](#input\_default\_node\_groups) | EKS managed node groups definition. |
enabled = optional(bool, true)
ami_id = optional(string, null) # default will use the latest 'amazon_linux_2' ami
instance_type = optional(string, "t3.micro")
authorized_ssh_ip_ranges = optional(list(string), ["0.0.0.0/0"])
username = optional(string, "ec2-user")
install_binaries = optional(bool, false)
})
object(| n/a | yes | | [deploy\_id](#input\_deploy\_id) | Domino Deployment ID. | `string` | `"domino-eks"` | no | -| [eks](#input\_eks) | k8s\_version = EKS cluster k8s version.
{
compute = object(
{
ami = optional(string, null)
bootstrap_extra_args = optional(string, "")
instance_types = optional(list(string), ["m5.2xlarge"])
spot = optional(bool, false)
min_per_az = optional(number, 0)
max_per_az = optional(number, 10)
desired_per_az = optional(number, 0)
availability_zone_ids = list(string)
labels = optional(map(string), {
"dominodatalab.com/node-pool" = "default"
})
taints = optional(list(object({
key = string
value = optional(string)
effect = string
})), [])
tags = optional(map(string), {})
gpu = optional(bool, null)
volume = optional(object({
size = optional(number, 1000)
type = optional(string, "gp3")
}), {
size = 1000
type = "gp3"
}
)
}),
platform = object(
{
ami = optional(string, null)
bootstrap_extra_args = optional(string, "")
instance_types = optional(list(string), ["m5.2xlarge"])
spot = optional(bool, false)
min_per_az = optional(number, 1)
max_per_az = optional(number, 10)
desired_per_az = optional(number, 1)
availability_zone_ids = list(string)
labels = optional(map(string), {
"dominodatalab.com/node-pool" = "platform"
})
taints = optional(list(object({
key = string
value = optional(string)
effect = string
})), [])
tags = optional(map(string), {})
gpu = optional(bool, null)
volume = optional(object({
size = optional(number, 100)
type = optional(string, "gp3")
}), {
size = 100
type = "gp3"
}
)
}),
gpu = object(
{
ami = optional(string, null)
bootstrap_extra_args = optional(string, "")
instance_types = optional(list(string), ["g4dn.xlarge"])
spot = optional(bool, false)
min_per_az = optional(number, 0)
max_per_az = optional(number, 10)
desired_per_az = optional(number, 0)
availability_zone_ids = list(string)
labels = optional(map(string), {
"dominodatalab.com/node-pool" = "default-gpu"
"nvidia.com/gpu" = true
})
taints = optional(list(object({
key = string
value = optional(string)
effect = string
})), [{
key = "nvidia.com/gpu"
value = "true"
effect = "NO_SCHEDULE"
}
])
tags = optional(map(string), {})
gpu = optional(bool, null)
volume = optional(object({
size = optional(number, 1000)
type = optional(string, "gp3")
}), {
size = 1000
type = "gp3"
}
)
})
})
object({| `{}` | no | +| [eks](#input\_eks) | k8s\_version = EKS cluster k8s version.
k8s_version = optional(string, "1.27")
kubeconfig = optional(object({
extra_args = optional(string, "")
path = optional(string, null)
}), {})
public_access = optional(object({
enabled = optional(bool, false)
cidrs = optional(list(string), [])
}), {})
custom_role_maps = optional(list(object({
rolearn = string
username = string
groups = list(string)
})), [])
master_role_names = optional(list(string), [])
cluster_addons = optional(list(string), ["kube-proxy", "coredns"])
ssm_log_group_name = optional(string, "session-manager")
vpc_cni = optional(object({
prefix_delegation = optional(bool)
annotate_pod_ip = optional(bool)
}))
identity_providers = optional(list(object({
client_id = string
groups_claim = optional(string, null)
groups_prefix = optional(string, null)
identity_provider_config_name = string
issuer_url = optional(string, null)
required_claims = optional(string, null)
username_claim = optional(string, null)
username_prefix = optional(string, null)
})), [])
})
object({| `{}` | no | | [enable\_private\_link](#input\_enable\_private\_link) | Enable Private Link connections | `bool` | `false` | no | | [kms](#input\_kms) | enabled = Toggle,if set use either the specified KMS key\_id or a Domino-generated one.
k8s_version = optional(string, "1.27")
nodes_master = optional(bool, false)
kubeconfig = optional(object({
extra_args = optional(string, "")
path = optional(string, null)
}), {})
public_access = optional(object({
enabled = optional(bool, false)
cidrs = optional(list(string), [])
}), {})
custom_role_maps = optional(list(object({
rolearn = string
username = string
groups = list(string)
})), [])
master_role_names = optional(list(string), [])
cluster_addons = optional(list(string), ["kube-proxy", "coredns"])
ssm_log_group_name = optional(string, "session-manager")
vpc_cni = optional(object({
prefix_delegation = optional(bool)
annotate_pod_ip = optional(bool)
}))
identity_providers = optional(list(object({
client_id = string
groups_claim = optional(string, null)
groups_prefix = optional(string, null)
identity_provider_config_name = string
issuer_url = optional(string, null)
required_claims = optional(string, null)
username_claim = optional(string, null)
username_prefix = optional(string, null)
})), [])
})
object({| `{}` | no | | [network](#input\_network) | vpc = {
enabled = optional(bool, true)
key_id = optional(string, null)
additional_policies = optional(list(string), [])
})
object({| `{}` | no | | [region](#input\_region) | AWS region for the deployment | `string` | n/a | yes | | [route53\_hosted\_zone\_name](#input\_route53\_hosted\_zone\_name) | Optional hosted zone for External DNS zone. | `string` | `null` | no | +| [single\_node](#input\_single\_node) | Additional EKS managed node groups definition. |
vpc = optional(object({
id = optional(string, null)
subnets = optional(object({
private = optional(list(string), [])
public = optional(list(string), [])
pod = optional(list(string), [])
}), {})
}), {})
network_bits = optional(object({
public = optional(number, 27)
private = optional(number, 19)
pod = optional(number, 19)
}
), {})
cidrs = optional(object({
vpc = optional(string, "10.0.0.0/16")
pod = optional(string, "100.64.0.0/16")
}), {})
use_pod_cidr = optional(bool, true)
})
object({| `null` | no | | [ssh\_pvt\_key\_path](#input\_ssh\_pvt\_key\_path) | SSH private key filepath. | `string` | n/a | yes | | [storage](#input\_storage) | storage = {
name = optional(string, "single-node")
bootstrap_extra_args = optional(string, "")
ami = optional(object({
name_prefix = optional(string, null)
owner = optional(string, null)
}))
instance_type = optional(string, "m5.2xlarge")
authorized_ssh_ip_ranges = optional(list(string), ["0.0.0.0/0"])
labels = optional(map(string))
taints = optional(list(object({
key = string
value = optional(string)
effect = string
})), [])
volume = optional(object({
size = optional(number, 1000)
type = optional(string, "gp3")
}), {})
})
object({| `{}` | no | | [tags](#input\_tags) | Deployment tags. | `map(string)` | `{}` | no | diff --git a/tests/plan/terraform/main.tf b/tests/plan/terraform/main.tf index 74805483..bf242196 100644 --- a/tests/plan/terraform/main.tf +++ b/tests/plan/terraform/main.tf @@ -50,3 +50,17 @@ module "nodes" { kms_info = module.infra.kms tags = module.infra.tags } + + + +module "single_node" { + count = var.single_node != null ? 1 : 0 + source = "./../../../modules/single-node" + + region = module.infra.region + ssh_key = module.infra.ssh_key + single_node = var.single_node + eks_info = module.eks.info + network_info = module.infra.network + kms_info = module.infra.kms +} diff --git a/tests/plan/terraform/variables.tf b/tests/plan/terraform/variables.tf index e94a2474..bf30f2ac 100644 --- a/tests/plan/terraform/variables.tf +++ b/tests/plan/terraform/variables.tf @@ -54,6 +54,7 @@ variable "ssh_pvt_key_path" { variable "eks" { description = <
efs = optional(object({
access_point_path = optional(string, "/domino")
backup_vault = optional(object({
create = optional(bool, true)
force_destroy = optional(bool, true)
backup = optional(object({
schedule = optional(string, "0 12 * * ? *")
cold_storage_after = optional(number, 35)
delete_after = optional(number, 125)
}), {})
}), {})
}), {})
s3 = optional(object({
force_destroy_on_deletion = optional(bool, true)
}), {})
ecr = optional(object({
force_destroy_on_deletion = optional(bool, true)
}), {})
})